text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import partial
import click
from notifiers import __version__, get_notifier
from notifiers.core import all_providers
from notifiers.exceptions import NotifierException
from notifiers_cli.utils.dynamic_click import schema_to_command, CORE_COMMANDS
from notifiers_cli.utils.callbacks import func_factory, _notify, _resource, _resources
def provider_group_factory():
"""Dynamically generate provider groups for all providers, and add all basic command to it"""
for provider in all_providers():
p = get_notifier(provider)
provider_name = p.name
help = f"Options for '{provider_name}'"
group = click.Group(name=provider_name, help=help)
# Notify command
notify = partial(_notify, p=p)
group.add_command(schema_to_command(p, "notify", notify, add_message=True))
# Resources command
resources_callback = partial(_resources, p=p)
resources_cmd = click.Command(
"resources",
callback=resources_callback,
help="Show provider resources list",
)
group.add_command(resources_cmd)
pretty_opt = click.Option(
["--pretty/--not-pretty"], help="Output a pretty version of the JSON"
)
# Add any provider resources
for resource in p.resources:
rsc = getattr(p, resource)
rsrc_callback = partial(_resource, rsc)
rsrc_command = schema_to_command(
rsc, resource, rsrc_callback, add_message=False
)
rsrc_command.params.append(pretty_opt)
group.add_command(rsrc_command)
for name, description in CORE_COMMANDS.items():
callback = func_factory(p, name)
params = [pretty_opt]
command = click.Command(
name,
callback=callback,
help=description.format(provider_name),
params=params,
)
group.add_command(command)
notifiers_cli.add_command(group)
@click.group()
@click.version_option(
version=__version__, prog_name="notifiers", message=("%(prog)s %(version)s")
)
@click.option("--env-prefix", help="Set a custom prefix for env vars usage")
@click.pass_context
def notifiers_cli(ctx, env_prefix):
"""Notifiers CLI operation"""
ctx.obj["env_prefix"] = env_prefix
@notifiers_cli.command()
def providers():
"""Shows all available providers"""
click.echo(", ".join(all_providers()))
def entry_point():
"""The entry that CLI is executed from"""
try:
provider_group_factory()
notifiers_cli(obj={})
except NotifierException as e:
click.secho(f"ERROR: {e.message}", bold=True, fg="red")
exit(1)
if __name__ == "__main__":
entry_point()
| {
"repo_name": "liiight/notifiers",
"path": "notifiers_cli/core.py",
"copies": "1",
"size": "2810",
"license": "mit",
"hash": -8498828711229496000,
"line_mean": 30.5730337079,
"line_max": 97,
"alpha_frac": 0.6181494662,
"autogenerated": false,
"ratio": 3.968926553672316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006533022176914985,
"num_lines": 89
} |
from functools import partial
import colour
import numpy as np
from scipy import optimize
def delta_e(rgb1, rgb2):
"""Returns the CIEDE2000 difference between rgb1 and rgb2 (both sRGB with range 0-1).
Reference: https://en.wikipedia.org/wiki/Color_difference#CIEDE2000."""
lab1 = colour.XYZ_to_Lab(colour.sRGB_to_XYZ(rgb1))
lab2 = colour.XYZ_to_Lab(colour.sRGB_to_XYZ(rgb2))
return colour.delta_E_CIE2000(lab1, lab2)
def opfunc_(x, loss, eps=1e-8):
"""Given a loss function i.e. delta_e(), returns the loss and gradient at a point x. The
gradient is computed by finite difference."""
grad = np.zeros_like(x)
eye = np.eye(len(x))
for i in range(len(x)):
fx = loss(x)
grad[i] = (loss(x + eps*eye[i]) - fx) / eps
return fx, grad
def gamut_map(rgb):
"""Finds the nearest in-gamut color to an out-of-gamut color using delta_e() as its measure of
difference."""
x = np.clip(rgb, 0, 1)
if (rgb == x).all():
return x
loss = partial(delta_e, rgb)
opfunc = partial(opfunc_, loss=loss)
x, _, _ = optimize.fmin_l_bfgs_b(opfunc, x, bounds=[(0, 1)]*3)
return x | {
"repo_name": "Harold2017/myfirstflasky",
"path": "app/cri/test1.py",
"copies": "1",
"size": "1153",
"license": "mit",
"hash": -1625847012439836700,
"line_mean": 31.0555555556,
"line_max": 98,
"alpha_frac": 0.6392020815,
"autogenerated": false,
"ratio": 2.994805194805195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4134007276305195,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import commonware.log
from piston.authentication.oauth import OAuthAuthentication, views
from rest_framework.authentication import BaseAuthentication
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import render
from access.middleware import ACLMiddleware
from users.models import UserProfile
from zadmin import jinja_for_django
# This allows the views in piston.authentication.oauth to cope with
# Jinja2 templates as opposed to Django templates.
# Piston view passes: template, context, request_context
def jfd(a, b, c):
return jinja_for_django(a, b, context_instance=c)
views.render_to_response = jfd
log = commonware.log.getLogger('z.api')
class RestOAuthAuthentication(BaseAuthentication):
www_authenticate_realm = ''
def authenticate(self, request):
# Most of the work here is in the RestOAuthMiddleware.
if (getattr(request._request, 'user', None) and
'RestOAuth' in getattr(request._request, 'authed_from', [])):
request.user = request._request.user
return request.user, None
def authenticate_header(self, request):
return 'OAuth realm="%s"' % self.www_authenticate_realm
class AMOOAuthAuthentication(OAuthAuthentication):
"""^^^MOO!!! Adds amo_user to the request object."""
def is_authenticated(self, request):
if request.user and request.user.is_authenticated():
return True
# To avoid patching django-piston, use a partial to cope with
# piston not sending in request when called later.
self.challenge = partial(self._challenge, request=request)
# Authenticate the user using Piston, rv will be True or False
# depending upon how it went.
rv = super(AMOOAuthAuthentication, self).is_authenticated(request)
if rv and request.user:
# The user is there, but we need to alter the user to be
# a user specified in the request. Specifically chose this
# term to avoid conflict with user, which could be used elsewhere.
if self.two_legged and 'authenticate_as' in request.REQUEST:
pk = request.REQUEST.get('authenticate_as')
try:
profile = UserProfile.objects.get(pk=pk)
except UserProfile.DoesNotExist:
log.warning('Cannot find user: %s' % pk)
return False
if profile.deleted or profile.confirmationcode:
log.warning('Tried to use deleted or unconfirmed user: %s'
% pk)
return False
log.info('Authenticating as: %s' % pk)
request.user = profile
# If that worked and request.user got set, setup AMO specific bits.
ACLMiddleware().process_request(request)
else:
# The piston middleware could find a consumer, but no
# user on that consumer. If it does it returns True, but
# request.user is None, which then blows up other things.
request.user = AnonymousUser()
return False
return rv
def _challenge(self, request):
response = render(request, 'piston/oauth/challenge.html',
status=401)
response['WWW-Authenticate'] = 'OAuth realm="API"'
return response
| {
"repo_name": "johancz/olympia",
"path": "apps/api/authentication.py",
"copies": "13",
"size": "3431",
"license": "bsd-3-clause",
"hash": -2115784855986168300,
"line_mean": 37.1222222222,
"line_max": 79,
"alpha_frac": 0.6423783154,
"autogenerated": false,
"ratio": 4.4908376963350785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00035842293906810036,
"num_lines": 90
} |
from functools import partial
import commonware.log
from piston.authentication.oauth import OAuthAuthentication, views
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import render
from access.middleware import ACLMiddleware
from users.models import UserProfile
from zadmin import jinja_for_django
# This allows the views in piston.authentication.oauth to cope with
# Jinja2 templates as opposed to Django templates.
# Piston view passes: template, context, request_context
jfd = lambda a, b, c: jinja_for_django(a, b, context_instance=c)
views.render_to_response = jfd
log = commonware.log.getLogger('z.api')
class AMOOAuthAuthentication(OAuthAuthentication):
"""^^^MOO!!! Adds amo_user to the request object."""
def is_authenticated(self, request):
if request.user and request.user.is_authenticated():
return True
# To avoid patching django-piston, use a partial to cope with
# piston not sending in request when called later.
self.challenge = partial(self._challenge, request=request)
# Authenticate the user using Piston, rv will be True or False
# depending upon how it went.
rv = super(AMOOAuthAuthentication, self).is_authenticated(request)
if rv and request.user:
# The user is there, but we need to alter the user to be
# a user specified in the request. Specifically chose this
# term to avoid conflict with user, which could be used elsewhere.
if self.two_legged and 'authenticate_as' in request.REQUEST:
pk = request.REQUEST.get('authenticate_as')
try:
profile = UserProfile.objects.get(pk=pk)
except UserProfile.DoesNotExist:
log.warning('Cannot find user: %s' % pk)
return False
if profile.deleted or profile.confirmationcode:
log.warning('Tried to use deleted or unconfirmed user: %s'
% pk)
return False
log.info('Authenticating as: %s' % pk)
request.user = profile.user
# If that worked and request.user got set, setup AMO specific bits.
ACLMiddleware().process_request(request)
else:
# The piston middleware could find a consumer, but no
# user on that consumer. If it does it returns True, but
# request.user is None, which then blows up other things.
request.user = AnonymousUser()
return False
return rv
def _challenge(self, request):
response = render(request, 'piston/oauth/challenge.html',
status=401)
response['WWW-Authenticate'] = 'OAuth realm="API"'
return response
| {
"repo_name": "clouserw/olympia",
"path": "apps/api/authentication.py",
"copies": "2",
"size": "2851",
"license": "bsd-3-clause",
"hash": 6165866338761803000,
"line_mean": 38.0547945205,
"line_max": 79,
"alpha_frac": 0.6369694844,
"autogenerated": false,
"ratio": 4.482704402515723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021074815595363542,
"num_lines": 73
} |
from functools import partial
import commonware.log
import jingo
from piston.authentication.oauth import OAuthAuthentication, views
from django.contrib.auth.models import AnonymousUser
from access.middleware import ACLMiddleware
from users.models import UserProfile
from zadmin import jinja_for_django
# This allows the views in piston.authentication.oauth to cope with
# Jinja2 templates as opposed to Django templates.
# Piston view passes: template, context, request_context
jfd = lambda a, b, c: jinja_for_django(a, b, context_instance=c)
views.render_to_response = jfd
log = commonware.log.getLogger('z.api')
class AMOOAuthAuthentication(OAuthAuthentication):
"""^^^MOO!!! Adds amo_user to the request object."""
def is_authenticated(self, request):
if request.user and request.user.is_authenticated():
return True
# To avoid patching django-piston, use a partial to cope with
# piston not sending in request when called later.
self.challenge = partial(self._challenge, request=request)
# Authenticate the user using Piston, rv will be True or False
# depending upon how it went.
rv = super(AMOOAuthAuthentication, self).is_authenticated(request)
if rv and request.user:
# The user is there, but we need to alter the user to be
# a user specified in the request. Specifically chose this
# term to avoid conflict with user, which could be used elsewhere.
if self.two_legged and 'authenticate_as' in request.REQUEST:
pk = request.REQUEST.get('authenticate_as')
try:
profile = UserProfile.objects.get(pk=pk)
except UserProfile.DoesNotExist:
log.warning('Cannot find user: %s' % pk)
return False
if profile.deleted or profile.confirmationcode:
log.warning('Tried to use deleted or unconfirmed user: %s'
% pk)
return False
log.info('Authenticating as: %s' % pk)
request.user = profile.user
# If that worked and request.user got set, setup AMO specific bits.
ACLMiddleware().process_request(request)
else:
# The piston middleware could find a consumer, but no
# user on that consumer. If it does it returns True, but
# request.user is None, which then blows up other things.
request.user = AnonymousUser()
return False
return rv
def _challenge(self, request):
response = jingo.render(request, 'piston/oauth/challenge.html',
status=401)
response['WWW-Authenticate'] = 'OAuth realm="API"'
return response
| {
"repo_name": "wagnerand/zamboni",
"path": "apps/api/authentication.py",
"copies": "4",
"size": "2840",
"license": "bsd-3-clause",
"hash": -3224747741424847000,
"line_mean": 37.904109589,
"line_max": 79,
"alpha_frac": 0.6341549296,
"autogenerated": false,
"ratio": 4.45141065830721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7085565587907211,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import cv2
import matplotlib
import numpy as np
import pylab
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
from scipy.spatial import ConvexHull
from skimage import measure
from sklearn import metrics
from sklearn.cluster import DBSCAN
from sklearn.cluster import MeanShift
from sklearn.cluster import AgglomerativeClustering
from sklearn.manifold import SpectralEmbedding
from sklearn.manifold import Isomap
from sklearn.decomposition import PCA
from sklearn import datasets
from math import sqrt
from dataflow import ports
class Clusters:
def __init__(self, labels, n_clusters, data=None):
self.centroids = np.zeros((n_clusters, 2))
self.counts = np.zeros(n_clusters)
self.std = np.zeros((n_clusters, 2))
self.bounds = np.zeros((n_clusters, 4))
for i in range(n_clusters):
selection = \
np.stack(np.where(labels == i + 1), axis=1) if data is None else data[labels == i, :2]
self.counts[i] = selection.shape[0]
self.centroids[i] = np.mean(selection, axis=0)[::-1]
self.std[i, :] = np.std(selection, axis=0)[::-1]
self.bounds[i, :2] = np.amin(selection, axis=0)[::-1]
self.bounds[i, 2:] = np.amax(selection, axis=0)[::-1]
def _row_condition(val, cond):
out = np.ones(val.shape[0], dtype=np.bool)
if cond is not None:
for i in range(val.shape[0]):
if not cond(val[i, ...]):
out[i] = False
return out
def cluster_filter(clusters, condition_centroids=None, condition_counts=None, condition_std=None,
condition_bounds=None):
if clusters is None:
return None
selection = np.bitwise_and(
np.bitwise_and(_row_condition(clusters.centroids, condition_centroids),
_row_condition(clusters.counts, condition_counts)),
np.bitwise_and(_row_condition(clusters.std, condition_std),
_row_condition(clusters.bounds, condition_bounds)))
clusters.centroids = clusters.centroids[selection]
clusters.counts = clusters.counts[selection]
clusters.std = clusters.std[selection]
clusters.bounds = clusters.bounds[selection]
return clusters
def cluster_filter_op(condition_centroids=None, condition_counts=None, condition_std=None, condition_bounds=None):
return partial(cluster_filter, condition_centroids=condition_centroids, condition_counts=condition_counts,
condition_std=condition_std, condition_bounds=condition_bounds)
def _cluster_scaler(clusters, scale):
if clusters is None:
return None
clusters.centroids *= scale
clusters.std *= scale
clusters.counts *= scale
clusters.bounds *= scale
return clusters
def cluster_scaler(scale):
return partial(_cluster_scaler, scale=scale)
class BlobClusteringConnectivity:
def __init__(self, thresh_intensity=50, debug=False):
self.sink_image = ports.StateSink()
self.thresh_intensity = thresh_intensity
self.debug = debug
self.out_debug_image = ports.EventSource()
def make_clusters(self):
img = self.sink_image.get()
if img is None:
return None
img = cv2.threshold(img, thresh=self.thresh_intensity, maxval=255, type=cv2.THRESH_BINARY)[1]
labels, num = measure.label(img, connectivity=2, return_num=True)
if num == 0:
return None
if self.debug:
tmp = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
cv2.applyColorMap(np.array(labels * 50, dtype=np.uint8), cv2.COLORMAP_AUTUMN, dst=tmp)
tmp[labels == 0] = 0
self.out_debug_image.fire(tmp)
return Clusters(labels, n_clusters=num - 1)
class BlobClusteringDBSCAN:
def __init__(self, dist, min_neighborhood=1, thresh_intensity=50, scale_intensity=1, debug=False):
self.algo = DBSCAN(eps=dist, min_samples=min_neighborhood, n_jobs=-1)
self.sink_image = ports.StateSink()
self.thresh_intensity = thresh_intensity
self.scale_intensity = scale_intensity
self.debug = debug
self.out_debug_image = ports.EventSource()
self.data = None
self.debug_buf = None
def make_clusters(self):
img = self.sink_image.get()
if img is None:
return None
selection = np.where(img >= self.thresh_intensity)
n_data = selection[0].shape[0]
if self.data is None or self.data.shape[0] < n_data:
self.data = np.zeros(shape=(n_data, 3), dtype=np.uint8)
self.data[:n_data, :2] = np.stack(selection, axis=1)
self.data[:n_data, 2] = img[selection] * self.scale_intensity
clustering = self.algo.fit(self.data[:n_data, :])
labels = clustering.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
if n_clusters_ == 0:
return None
if self.debug:
if self.debug_buf is None or self.debug_buf.shape[:2] != img.shape[:2]:
self.debug_buf = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
offset = 50
for i in range(n_clusters_):
cluster = labels == i
self.debug_buf[:, :, 0][self.data[:n_data, :][cluster, 0], self.data[:n_data, :][cluster, 1]] = int(
i * offset + offset)
cv2.applyColorMap(self.debug_buf[:, :, 0], cv2.COLORMAP_AUTUMN, dst=self.debug_buf)
self.debug_buf[img < self.thresh_intensity, :] = 0
self.out_debug_image.fire(self.debug_buf)
return Clusters(labels, n_clusters_, self.data[:n_data, :])
class _simple_clustering:
def __init__(self, algo):
self.sink_data = ports.StateSink()
self.algo = algo
def do_clustering(self):
data = self.sink_data.get()
if data is None:
return None
clustering = self.algo.fit(data)
labels = clustering.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
if n_clusters_ == 0:
return None
return clustering
class SimpleDBSCAN(_simple_clustering):
def __init__(self, dist, min_neighborhood=1):
_simple_clustering.__init__(self, DBSCAN(eps=dist, min_samples=min_neighborhood, n_jobs=-1))
class SimpleMeanShift(_simple_clustering):
def __init__(self, bandwidth=None):
_simple_clustering.__init__(self, MeanShift(bandwidth=bandwidth, n_jobs=-1))
class SimpleAgglometrative(_simple_clustering):
def __init__(self, n_clusters=2, *args, **kwargs):
_simple_clustering.__init__(self, AgglomerativeClustering(n_clusters=n_clusters, *args, **kwargs))
class SilhouetteScore:
def __init__(self, sample_size=5000, *args, **kwargs):
self.sample_size = sample_size
self.sink_data = ports.StateSink()
self.score_args = args
self.score_kwargs = kwargs
def __call__(self, clusters):
data = self.sink_data.get()
if clusters is None or data is None:
return None
labels = clusters.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
if n_clusters_ >= 2:
return metrics.silhouette_score(data, labels, sample_size=2000, *self.score_args, **self.score_kwargs)
else:
return None
class ClusterPrinter:
def __init__(self, num_images=20):
# self.reducer = SpectralEmbedding()
self.reducer = Isomap()
self.sink_features = ports.StateSink()
self.sink_filename = ports.StateSink()
self.sink_image = ports.StateSink()
self.num_images = num_images
def __call__(self, clusters):
features = self.sink_features.get()
if clusters is None or features is None:
return None
valid = clusters.labels_ != -1
view_data = features[valid]
labels = clusters.labels_
valid_labels = labels[valid]
if len(valid_labels) == 0:
return None
choice = np.random.choice(range(len(valid_labels)), size=min(2000, len(valid_labels)), replace=False)
view_data = self.reducer.fit(view_data[choice, :]).transform(features)
print view_data.shape
fig, ax = plt.subplots(figsize=(15, 15), dpi=300)
num_clusters = len(set(valid_labels))
patches = []
for l in range(num_clusters):
cluster = view_data[labels == l, :]
try:
hull = ConvexHull(cluster)
patches.append(Polygon(cluster[hull.vertices, :]))
except:
pass
p = PatchCollection(patches, cmap=matplotlib.cm.rainbow, alpha=0.4)
ax.add_collection(p)
invalid = np.invert(valid)
plt.scatter(view_data[invalid, 0], view_data[invalid, 1], c='w', s=0.1)
ax.set_facecolor('black')
plt.scatter(view_data[valid, 0], view_data[valid, 1], c=valid_labels, s=0.1, cmap='rainbow')
# Add a few images to the figure
choices = []
imgs_per_label = max(1, int(self.num_images / num_clusters))
for l in range(num_clusters):
cluster_ind = np.where(labels == l)[0]
choices += np.random.choice(cluster_ind, size=min(imgs_per_label, len(cluster_ind)), replace=False).tolist()
plt.scatter(view_data[choices, 0], view_data[choices, 1], c=labels[choices], s=180, marker='s',
cmap='rainbow')
# Get the x and y data and transform it into pixel coordinates
xy_pixels = ax.transData.transform(np.vstack([view_data[choices, 0], view_data[choices, 1]]).T)
xpix, ypix = xy_pixels.T
for i, c in enumerate(choices):
img = self.sink_image.get(c)
if img is None:
continue
scale = 50.0 / np.max(img.shape)
img = cv2.cvtColor(cv2.resize(img, dsize=(0, 0), fx=scale, fy=scale), code=cv2.COLOR_BGR2RGB).astype(
np.float32) / 255
plt.figimage(img, xo=int(xpix[i]) - 25, yo=int(ypix[i]) - 25, zorder=10)
pylab.savefig(self.sink_filename.get(), dpi=fig.dpi)
plt.close('all')
class ClusterSamplePrinter:
def __init__(self, num_images=20, num_rows=1):
self.sink_filename = ports.StateSink()
self.sink_image = ports.StateSink()
self.num_images = num_images
self.num_rows = num_rows
def __call__(self, clusters):
if clusters is None:
return None
valid_labels = clusters.labels_[clusters.labels_ != -1]
num_clusters = len(set(valid_labels))
# Add a few images to the figure
choices = []
for l in range(num_clusters):
cluster_ind = np.where(clusters.labels_ == l)[0]
choices.append(
np.random.choice(cluster_ind, size=min(self.num_images*self.num_rows, len(cluster_ind)), replace=False).tolist())
edge_length = 50
spacing = 25
out_img = np.zeros(shape=[edge_length * num_clusters * self.num_rows + (num_clusters - 1) * spacing,
edge_length * self.num_images, 3],
dtype=np.uint8)
print(out_img.shape)
for i_cluster, cluster_choice in enumerate(choices):
for i, c in enumerate(cluster_choice):
img = self.sink_image.get(c)
if img is None:
continue
scale = float(edge_length) / np.max(img.shape)
img = cv2.resize(img, dsize=(0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LANCZOS4)
y_start = (i_cluster * self.num_rows + int(i / self.num_images)) * edge_length + i_cluster * spacing
x_start = (i % self.num_images) * edge_length
out_img[y_start:y_start + img.shape[0], x_start:x_start + +img.shape[1], :] = img
cv2.imwrite(self.sink_filename.get(), out_img)
if __name__ == '__main__':
cp = ClusterSamplePrinter(20, 2)
digits = datasets.load_digits(n_class=6)
X = digits.data
class mock_clusters:
def __init__(self, labels):
self.labels_ = labels
# cp.sink_features << (lambda: X)
cp.sink_filename << (lambda: 'D:/tmp.png')
cp.sink_image << (lambda _: cv2.imread('D:/Master Thesis/Other data/dlibfacepoints.png'))
labels_ = digits.target
labels_[10] = -1
cp(mock_clusters(labels_))
| {
"repo_name": "JustusSchwan/MasterThesis",
"path": "operators/clustering.py",
"copies": "1",
"size": "12635",
"license": "mit",
"hash": 8686867123039234000,
"line_mean": 36.7164179104,
"line_max": 129,
"alpha_frac": 0.6041946973,
"autogenerated": false,
"ratio": 3.5864320181663354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46906267154663356,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.db import connections
from django.db.transaction import atomic
from concurrency.triggers import create_triggers, drop_triggers, get_triggers
def _add_subparser(subparsers, parser, name, help):
if django.VERSION >= (2, 1):
subparsers.add_parser(name,
help=help)
else:
subparsers.add_parser(name,
cmd=parser,
help=help)
class Command(BaseCommand):
args = ''
help = 'register Report classes and create one ReportConfiguration per each'
requires_system_checks = False
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
subparsers = parser.add_subparsers(help='sub-command help',
dest='command')
add_parser = partial(_add_subparser, subparsers, parser)
add_parser('list', help="list concurrency triggers")
add_parser('drop', help="drop concurrency triggers")
add_parser('create', help="create concurrency triggers")
parser.add_argument('-d', '--database',
action='store',
dest='database',
default=None,
help='limit to this database')
parser.add_argument('-t', '--trigger',
action='store',
dest='trigger',
default=None,
help='limit to this trigger name')
def _list(self, databases):
for alias, triggers in get_triggers(databases).items():
self.stdout.write("Database: {}".format(alias))
for trigger in triggers:
self.stdout.write(" {}".format(trigger))
self.stdout.write('')
def handle(self, *args, **options):
cmd = options['command']
database = options['database']
if database is None:
databases = [alias for alias in connections]
else:
databases = [database]
with atomic():
try:
if cmd == 'list':
self._list(databases)
elif cmd == 'create':
for alias, triggers in create_triggers(databases).items():
self.stdout.write("Database: {}".format(alias))
for trigger in triggers:
self.stdout.write(" Created {0[2]} for {0[1]}".format(trigger))
self.stdout.write('')
elif cmd == 'drop':
for alias, triggers in drop_triggers(*databases).items():
self.stdout.write("Database: {}".format(alias))
for trigger in triggers:
self.stdout.write(" Dropped {0[2]}".format(trigger))
self.stdout.write('')
else: # pragma: no cover
raise Exception()
except ImproperlyConfigured as e: # pragma: no cover
self.stdout.write(self.style.ERROR(e))
| {
"repo_name": "saxix/django-concurrency",
"path": "src/concurrency/management/commands/triggers.py",
"copies": "1",
"size": "3347",
"license": "mit",
"hash": 7362631122529806000,
"line_mean": 37.0340909091,
"line_max": 95,
"alpha_frac": 0.5219599641,
"autogenerated": false,
"ratio": 5.078907435508346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6100867399608347,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import django
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_init, post_save
from cbe.party.models import PartyRole, Organisation
customer_status_choices = (('new', 'new'), ('active', 'active'),
('inactive', 'inactive'), ('prospective', 'prospective'))
credit_status_choices = (('active', 'active'), ('stop', 'stop'),)
class Customer(PartyRole):
customer_number = models.CharField(primary_key=True, max_length=200)
customer_status = models.CharField(max_length=100, choices=customer_status_choices)
managed_by = models.ForeignKey(Organisation, on_delete=django.db.models.deletion.CASCADE, null=True, blank=True,related_name='manages_customers')
class Meta:
ordering = ['customer_number']
def save(self, *args, **kwargs):
self.name = "Customer"
super(Customer, self).save(*args, **kwargs)
def __str__(self):
return "%s:%s" % (self.customer_number, self.party)
class CustomerAccountContact(PartyRole):
def save(self, *args, **kwargs):
self.name = "CustomerAccountContact"
super(CustomerAccountContact, self).save(*args, **kwargs)
class CustomerAccount(models.Model):
created = models.DateField(auto_now_add=True)
valid_from = models.DateField(null=True, blank=True)
valid_to = models.DateField(null=True, blank=True)
account_number = models.CharField(primary_key=True, max_length=200)
customer = models.ForeignKey(Customer, on_delete=django.db.models.deletion.CASCADE, related_name="customer_accounts")
account_status = models.CharField(max_length=100)
account_type = models.CharField(max_length=200)
name = models.CharField(max_length=300)
pin = models.CharField(max_length=100, null=True, blank=True)
customer_account_contact = models.ManyToManyField(CustomerAccountContact, blank=True, related_name="customer_accounts")
managed_by = models.ForeignKey(Organisation, on_delete=django.db.models.deletion.CASCADE, null=True, blank=True, related_name = "accounts_managed")
class Meta:
ordering = ['created']
def __str__(self):
return "%s:%s" % (self.account_number, self.name)
class CustomerAccountRelationship(models.Model):
valid_from = models.DateField(null=True, blank=True)
valid_to = models.DateField(null=True, blank=True)
relationship_type = models.CharField(max_length=200)
from_account = models.ForeignKey( CustomerAccount, on_delete=django.db.models.deletion.CASCADE, related_name='related_from_account')
to_account = models.ForeignKey( CustomerAccount, on_delete=django.db.models.deletion.CASCADE, related_name='related_to_account')
| {
"repo_name": "cdaf/cbe",
"path": "cbe/cbe/customer/models.py",
"copies": "2",
"size": "2843",
"license": "apache-2.0",
"hash": 560227725328211100,
"line_mean": 40.2028985507,
"line_max": 151,
"alpha_frac": 0.7087583539,
"autogenerated": false,
"ratio": 3.7556142668428003,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.54643726207428,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import django
from django.utils import timezone
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_init, post_save
ACTION_CHOICES = (('add', 'add'), ('update', 'update'), ('delete', 'delete'))
class BusinessInteraction(models.Model):
interaction_date = models.DateField(default=timezone.now)
description = models.CharField(max_length=500, null=True, blank=True)
interaction_status = models.CharField(
max_length=100, null=True, blank=True)
previous_state = {}
place_content_type = models.ForeignKey(
ContentType, on_delete=django.db.models.deletion.CASCADE, null=True, blank=True, related_name="%(app_label)s_%(class)s_place_ownership")
place_object_id = models.PositiveIntegerField(null=True, blank=True)
place = GenericForeignKey('place_content_type', 'place_object_id')
#def __str__(self):
# return "%s:%s at %s" % (self.interaction_date, self.description, self.place)
class Meta:
abstract = True
class BusinessInteractionItem(models.Model):
business_interaction_content_type = models.ForeignKey(
ContentType, on_delete=django.db.models.deletion.CASCADE, null=True, blank=True, related_name="%(app_label)s_%(class)s_interaction_ownership")
business_interaction_object_id = models.PositiveIntegerField(null=True, blank=True)
business_interaction = GenericForeignKey('business_interaction_content_type', 'business_interaction_object_id')
quantity = models.IntegerField(null=True, blank=True)
action = models.CharField(
null=True, blank=True, max_length=50, choices=ACTION_CHOICES)
place_content_type = models.ForeignKey(
ContentType, on_delete=django.db.models.deletion.CASCADE, null=True, blank=True, related_name="%(app_label)s_%(class)s_ownership")
place_object_id = models.PositiveIntegerField(null=True, blank=True)
place = GenericForeignKey('place_content_type', 'place_object_id')
#def __str__(self):
# return "%s:%s" % (self.business_interaction, self.action)
class Meta:
abstract = True
class BusinessInteractionRole(models.Model):
business_interaction_content_type = models.ForeignKey(
ContentType, on_delete=django.db.models.deletion.CASCADE, null=True, blank=True, related_name="%(app_label)s_%(class)s_interaction_ownership")
business_interaction_object_id = models.PositiveIntegerField(null=True, blank=True)
business_interaction = GenericForeignKey('business_interaction_content_type', 'business_interaction_object_id')
name = models.CharField(max_length=100, null=True, blank=True)
# This can be resource or party role
interaction_role_content_type = models.ForeignKey(
ContentType, on_delete=django.db.models.deletion.CASCADE, null=True, blank=True, related_name="%(app_label)s_%(class)s_role_ownership")
interaction_role_object_id = models.PositiveIntegerField(
null=True, blank=True)
interaction_role = GenericForeignKey(
'interaction_role_content_type', 'interaction_role_object_id')
#def __str__(self):
# return "%s involved in %s as a %s" % (self.interaction_role, self.business_interaction, self.name)
class Meta:
abstract = True
| {
"repo_name": "cdaf/cbe",
"path": "cbe/cbe/business_interaction/models.py",
"copies": "2",
"size": "3403",
"license": "apache-2.0",
"hash": 4857259541592233000,
"line_mean": 45,
"line_max": 150,
"alpha_frac": 0.7184836909,
"autogenerated": false,
"ratio": 3.8714448236632535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5589928514563254,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import falcon
from graceful.parameters import IntParam
from graceful.resources.base import BaseResource
class BaseMixin:
"""Base mixin class."""
def handle(self, handler, req, resp, **kwargs):
"""Handle given resource manipulation flow in consistent manner.
This mixin is intended to be used only as a base class in new flow
mixin classes. It ensures that regardless of resource manunipulation
semantics (retrieve, get, delete etc.) the flow is always the same:
1. Decode and validate all request parameters from the query string
using ``self.require_params()`` method.
2. Use ``self.require_meta_and_content()`` method to construct ``meta``
and ``content`` dictionaries that will be later used to create
serialized response body.
3. Construct serialized response body using ``self.body()`` method.
Args:
handler (method): resource manipulation method handler.
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified.
**kwargs: additional keyword arguments retrieved from url
template.
Returns:
Content dictionary (preferably resource representation).
"""
params = self.require_params(req)
# future: remove in 1.x
if getattr(self, '_with_context', False):
handler = partial(handler, context=req.context)
meta, content = self.require_meta_and_content(
handler, params, **kwargs
)
self.make_body(resp, params, meta, content)
return content
class RetrieveMixin(BaseMixin):
"""Add default "retrieve flow on GET" to any resource class."""
def retrieve(self, params, meta, **kwargs):
"""Retrieve existing resource instance and return its representation.
Value returned by this handler will be included in response
'content' section.
Args:
params (dict): dictionary of parsed parameters accordingly
to definitions provided as resource class atributes.
meta (dict): dictionary of meta parameters anything added
to this dict will will be later included in response
'meta' section. This can already prepopulated by method
that calls this handler.
**kwargs: dictionary of values retrieved from route url
template by falcon. This is suggested way for providing
resource identifiers.
Returns:
value to be included in response 'content' section
"""
raise NotImplementedError("retrieve method not implemented")
def on_get(self, req, resp, handler=None, **kwargs):
"""Respond on GET HTTP request assuming resource retrieval flow.
This request handler assumes that GET requests are associated with
single resource instance retrieval. Thus default flow for such requests
is:
* Retrieve single resource instance of prepare its representation by
calling retrieve method handler.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): list method handler to be called. Defaults
to ``self.list``.
**kwargs: additional keyword arguments retrieved from url template.
"""
self.handle(
handler or self.retrieve, req, resp, **kwargs
)
class ListMixin(BaseMixin):
"""Add default "list flow on GET" to any resource class."""
def list(self, params, meta, **kwargs):
"""List existing resource instances and return their representations.
Value returned by this handler will be included in response
'content' section.
Args:
params (dict): dictionary of parsed parameters accordingly
to definitions provided as resource class atributes.
meta (dict): dictionary of meta parameters anything added
to this dict will will be later included in response
'meta' section. This can already prepopulated by method
that calls this handler.
**kwargs: dictionary of values retrieved from route url
template by falcon. This is suggested way for providing
resource identifiers.
Returns:
value to be included in response 'content' section
"""
raise NotImplementedError("list method not implemented")
def on_get(self, req, resp, handler=None, **kwargs):
"""Respond on GET HTTP request assuming resource list retrieval flow.
This request handler assumes that GET requests are associated with
resource list retrieval. Thus default flow for such requests is:
* Retrieve list of existing resource instances and prepare their
representations by calling list retrieval method handler.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): list method handler to be called. Defaults
to ``self.list``.
**kwargs: additional keyword arguments retrieved from url template.
"""
self.handle(
handler or self.list, req, resp, **kwargs
)
class DeleteMixin(BaseMixin):
"""Add default "delete flow on DELETE" to any resource class."""
def delete(self, params, meta, **kwargs):
"""Delete existing resource instance.
Args:
params (dict): dictionary of parsed parameters accordingly
to definitions provided as resource class atributes.
meta (dict): dictionary of meta parameters anything added
to this dict will will be later included in response
'meta' section. This can already prepopulated by method
that calls this handler.
**kwargs: dictionary of values retrieved from route url
template by falcon. This is suggested way for providing
resource identifiers.
Returns:
value to be included in response 'content' section
"""
raise NotImplementedError("delete method not implemented")
def on_delete(self, req, resp, handler=None, **kwargs):
"""Respond on DELETE HTTP request assuming resource deletion flow.
This request handler assumes that DELETE requests are associated with
resource deletion. Thus default flow for such requests is:
* Delete existing resource instance.
* Set response status code to ``202 Accepted``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): deletion method handler to be called. Defaults
to ``self.delete``.
**kwargs: additional keyword arguments retrieved from url template.
"""
self.handle(
handler or self.delete, req, resp, **kwargs
)
resp.status = falcon.HTTP_ACCEPTED
class UpdateMixin(BaseMixin):
"""Add default "update flow on PUT" to any resource class."""
def update(self, params, meta, **kwargs):
"""Update existing resource instance and return its representation.
Value returned by this handler will be included in response
'content' section.
Args:
params (dict): dictionary of parsed parameters accordingly
to definitions provided as resource class atributes.
meta (dict): dictionary of meta parameters anything added
to this dict will will be later included in response
'meta' section. This can already prepopulated by method
that calls this handler.
**kwargs: dictionary of values retrieved from route url
template by falcon. This is suggested way for providing
resource identifiers.
Returns:
value to be included in response 'content' section
"""
raise NotImplementedError("update method not implemented")
def on_put(self, req, resp, handler=None, **kwargs):
"""Respond on PUT HTTP request assuming resource update flow.
This request handler assumes that PUT requests are associated with
resource update/modification. Thus default flow for such requests is:
* Modify existing resource instance and prepare its representation by
calling its update method handler.
* Set response status code to ``202 Accepted``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): update method handler to be called. Defaults
to ``self.update``.
**kwargs: additional keyword arguments retrieved from url template.
"""
self.handle(
handler or self.update, req, resp, **kwargs
)
resp.status = falcon.HTTP_ACCEPTED
class CreateMixin(BaseMixin):
"""Add default "creation flow on POST" to any resource class."""
def create(self, params, meta, **kwargs):
"""Create new resource instance and return its representation.
This is default resource instance creation method. Value returned
is the representation of single resource instance. It will be included
in the 'content' section of response body.
Args:
params (dict): dictionary of parsed parameters accordingly
to definitions provided as resource class atributes.
meta (dict): dictionary of meta parameters anything added
to this dict will will be later included in response
'meta' section. This can already prepopulated by method
that calls this handler.
kwargs (dict): dictionary of values retrieved from route url
template by falcon. This is suggested way for providing
resource identifiers.
Returns:
value to be included in response 'content' section
"""
raise NotImplementedError("create method not implemented")
def get_object_location(self, obj):
"""Return location URI associated with given resource representation.
This handler is optional. Returned URI will be included as the
value of ``Location`` header on POST responses.
"""
raise NotImplementedError("update method not implemented")
def on_post(self, req, resp, handler=None, **kwargs):
"""Respond on POST HTTP request assuming resource creation flow.
This request handler assumes that POST requests are associated with
resource creation. Thus default flow for such requests is:
* Create new resource instance and prepare its representation by
calling its creation method handler.
* Try to retrieve URI of newly created object using
``self.get_object_location()``. If it succeeds use that URI as the
value of ``Location`` header in response object instance.
* Set response status code to ``201 Created``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): creation method handler to be called. Defaults
to ``self.create``.
**kwargs: additional keyword arguments retrieved from url template.
"""
obj = self.handle(
handler or self.create, req, resp, **kwargs
)
try:
resp.location = self.get_object_location(obj)
except NotImplementedError:
pass
resp.status = falcon.HTTP_CREATED
class CreateBulkMixin(BaseMixin):
"""Add default "bulk creation flow on PATCH" to any resource class."""
def create_bulk(self, params, meta, **kwargs):
"""Create multiple resource instances and return their representation.
This is default multiple resource instances creation method. Value
returned is the representation of multiple resource instances. It will
be included in the 'content' section of response body.
Args:
params (dict): dictionary of parsed parameters accordingly
to definitions provided as resource class atributes.
meta (dict): dictionary of meta parameters anything added
to this dict will will be later included in response
'meta' section. This can already prepopulated by method
that calls this handler.
kwargs (dict): dictionary of values retrieved from the route url
template by falcon. This is suggested way for providing
resource identifiers.
Returns:
value to be included in response 'content' section
"""
raise NotImplementedError("create method not implemented") # pragma: nocover # noqa
def on_patch(self, req, resp, handler=None, **kwargs):
"""Respond on POST HTTP request assuming resource creation flow.
This request handler assumes that POST requests are associated with
resource creation. Thus default flow for such requests is:
* Create new resource instances and prepare their representation by
calling its bulk creation method handler.
* Set response status code to ``201 Created``.
**Note:** this handler does not set ``Location`` header by default as
it would be valid only for single resource creation.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): creation method handler to be called. Defaults
to ``self.create``.
**kwargs: additional keyword arguments retrieved from url template.
"""
self.handle(
handler or self.create_bulk, req, resp, **kwargs
)
resp.status = falcon.HTTP_CREATED
class PaginatedMixin(BaseResource):
"""Add simple pagination capabilities to resource.
This class provides two additional parameters with some default
descriptions and ``add_pagination_meta`` method that can update
meta with more useful pagination information.
Example usage:
.. code-block:: python
from graceful.resources.mixins import PaginatedMixin
from graceful.resources.generic import ListResource
class SomeResource(PaginatedMixin, ListResource):
def list(self, params, meta):
# params has now 'page' and 'page_size' params that
# can be used for offset&limit-like operations
self.add_pagination_meta(params, meta)
# ...
"""
page_size = IntParam(
details="""Specifies number of result entries in single response""",
default='10'
)
page = IntParam(
details="""Specifies number of results page for response.
Page count starts from 0""",
default='0',
)
def add_pagination_meta(self, params, meta):
"""Extend default meta dictionary value with pagination hints.
Note:
This method handler attaches values to ``meta`` dictionary without
changing it's reference. This means that you should never replace
``meta`` dictionary with any other dict instance but simply modify
its content.
Args:
params (dict): dictionary of decoded parameter values
meta (dict): dictionary of meta values attached to response
"""
meta['page_size'] = params['page_size']
meta['page'] = params['page']
meta['prev'] = "page={0}&page_size={1}".format(
params['page'] - 1, params['page_size']
) if meta['page'] > 0 else None
meta['next'] = "page={0}&page_size={1}".format(
params['page'] + 1, params['page_size']
) if meta.get('has_more', True) else None
| {
"repo_name": "swistakm/graceful",
"path": "src/graceful/resources/mixins.py",
"copies": "1",
"size": "16487",
"license": "bsd-3-clause",
"hash": 6244467661431480000,
"line_mean": 38.4425837321,
"line_max": 92,
"alpha_frac": 0.637714563,
"autogenerated": false,
"ratio": 5.3183870967741935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 418
} |
from functools import partial
import graphene
from django.db.models.query import QuerySet
from django_measurement.models import MeasurementField
from django_prices.models import MoneyField, TaxedMoneyField
from graphene.relay import PageInfo
from graphene_django.converter import convert_django_field
from graphene_django.fields import DjangoConnectionField
from graphql_relay.connection.arrayconnection import connection_from_list_slice
from promise import Promise
from .types.common import Weight
from .types.money import Money, TaxedMoney
@convert_django_field.register(TaxedMoneyField)
def convert_field_taxed_money(*_args):
return graphene.Field(TaxedMoney)
@convert_django_field.register(MoneyField)
def convert_field_money(*_args):
return graphene.Field(Money)
@convert_django_field.register(MeasurementField)
def convert_field_measurements(*_args):
return graphene.Field(Weight)
class PrefetchingConnectionField(DjangoConnectionField):
@classmethod
def connection_resolver(
cls, resolver, connection, default_manager, max_limit,
enforce_first_or_last, root, info, **args):
# Disable `enforce_first_or_last` if not querying for `edges`.
values = [
field.name.value
for field in info.field_asts[0].selection_set.selections]
if 'edges' not in values:
enforce_first_or_last = False
return super().connection_resolver(
resolver, connection, default_manager, max_limit,
enforce_first_or_last, root, info, **args)
@classmethod
def resolve_connection(cls, connection, default_manager, args, iterable):
if iterable is None:
iterable = default_manager
if isinstance(iterable, QuerySet):
_len = iterable.count()
else:
_len = len(iterable)
connection = connection_from_list_slice(
iterable,
args,
slice_start=0,
list_length=_len,
list_slice_length=_len,
connection_type=connection,
edge_type=connection.Edge,
pageinfo_type=PageInfo,
)
connection.iterable = iterable
connection.length = _len
return connection
class FilterInputConnectionField(DjangoConnectionField):
def __init__(self, *args, **kwargs):
self.filter_field_name = kwargs.pop('filter_field_name', 'filter')
self.filter_input = kwargs.get(self.filter_field_name)
self.filterset_class = None
if self.filter_input:
self.filterset_class = self.filter_input.filterset_class
super().__init__(*args, **kwargs)
@classmethod
def connection_resolver(
cls, resolver, connection, default_manager, max_limit,
enforce_first_or_last, filterset_class, filters_name, root, info,
**args):
# Disable `enforce_first_or_last` if not querying for `edges`.
values = [
field.name.value
for field in info.field_asts[0].selection_set.selections]
if 'edges' not in values:
enforce_first_or_last = False
first = args.get('first')
last = args.get('last')
if enforce_first_or_last:
assert first or last, (
'You must provide a `first` or `last` value to properly '
'paginate the `{}` connection.'
).format(info.field_name)
if max_limit:
if first:
assert first <= max_limit, (
'Requesting {} records on the `{}` connection exceeds the '
'`first` limit of {} records.'
).format(first, info.field_name, max_limit)
args['first'] = min(first, max_limit)
if last:
assert last <= max_limit, (
'Requesting {} records on the `{}` connection exceeds the '
'`last` limit of {} records.'
).format(last, info.field_name, max_limit)
args['last'] = min(last, max_limit)
iterable = resolver(root, info, **args)
on_resolve = partial(cls.resolve_connection, connection,
default_manager, args)
filter_input = args.get(filters_name)
if filter_input and filterset_class:
iterable = filterset_class(
data=dict(filter_input),
queryset=iterable,
request=info.context).qs
if Promise.is_thenable(iterable):
return Promise.resolve(iterable).then(on_resolve)
return on_resolve(iterable)
def get_resolver(self, parent_resolver):
return partial(
super().get_resolver(parent_resolver),
self.filterset_class,
self.filter_field_name
)
| {
"repo_name": "UITools/saleor",
"path": "saleor/graphql/core/fields.py",
"copies": "1",
"size": "4852",
"license": "bsd-3-clause",
"hash": 4732193709261805000,
"line_mean": 33.4113475177,
"line_max": 79,
"alpha_frac": 0.6117065128,
"autogenerated": false,
"ratio": 4.2081526452732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.53198591580732,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import graphene
from graphql_to_rest import ExternalRESTField
HOST = 'http://test'
class Faction(graphene.ObjectType):
base_url = '{}/factions'.format(HOST)
id = graphene.ID()
name = graphene.String(name='name')
heroes = ExternalRESTField(
partial(lambda: Hero),
source_field_name='id',
filter_field_name='faction_id',
many=True
)
class Hero(graphene.ObjectType):
base_url = '{}/heroes'.format(HOST)
id = graphene.ID()
name = graphene.String(name='name')
faction_id = graphene.Int()
faction = ExternalRESTField(
Faction,
source_field_name='faction_id',
)
friend_ids = graphene.List(graphene.Int)
friends = ExternalRESTField(
partial(lambda: Hero),
source_field_name='friend_ids',
filter_field_name='id',
many=True
)
class Query(graphene.ObjectType):
factions = ExternalRESTField(
Faction,
id=graphene.Argument(graphene.ID),
is_top_level=True,
many=True
)
heroes = ExternalRESTField(
Hero,
id=graphene.Argument(graphene.ID),
is_top_level=True,
many=True
)
schema = graphene.Schema(query=Query)
| {
"repo_name": "curiousest/graphql-to-rest",
"path": "tests/compressed_schema.py",
"copies": "1",
"size": "1251",
"license": "apache-2.0",
"hash": -6852152027046630000,
"line_mean": 21.3392857143,
"line_max": 45,
"alpha_frac": 0.620303757,
"autogenerated": false,
"ratio": 3.446280991735537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9564235124675387,
"avg_score": 0.00046992481203007516,
"num_lines": 56
} |
from functools import partial
import hypothesis.strategies as st
import numpy as np
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from hypothesis import example, given, settings
from poliastro.twobody.sampling import sample_closed
angles = partial(st.floats, min_value=-2 * np.pi, max_value=2 * np.pi)
eccentricities = partial(st.floats, min_value=0, max_value=1, exclude_max=True)
@st.composite
def with_units(draw, elements, unit):
angle = draw(elements)
return angle * unit
angles_q = partial(with_units, elements=angles(), unit=u.rad)
eccentricities_q = partial(with_units, elements=eccentricities(), unit=u.one)
@settings(deadline=None)
@given(
min_nu=angles_q(), ecc=eccentricities_q(), max_nu=st.one_of(angles_q(), st.none()),
)
def test_sample_closed_is_always_between_minus_pi_and_pi(min_nu, ecc, max_nu):
result = sample_closed(min_nu, ecc, max_nu)
assert ((-np.pi * u.rad <= result) & (result <= np.pi * u.rad)).all()
@settings(deadline=None)
@given(
min_nu=with_units(
elements=st.floats(min_value=-np.pi, max_value=np.pi), unit=u.rad
),
ecc=eccentricities_q(),
max_nu=st.one_of(angles_q(), st.none()),
)
def test_sample_closed_starts_at_min_anomaly_if_in_range(min_nu, ecc, max_nu):
result = sample_closed(min_nu, ecc, max_nu)
assert_quantity_allclose(result[0], min_nu, atol=1e-15 * u.rad)
@settings(deadline=None)
@given(
min_nu=with_units(
elements=st.floats(min_value=-np.pi, max_value=np.pi), unit=u.rad
),
ecc=eccentricities_q(),
)
@example(0 * u.rad, 0 * u.one)
@example(0 * u.rad, 0.88680956 * u.one)
def test_sample_closed_starts_and_ends_at_min_anomaly_if_in_range_and_no_max_given(
min_nu, ecc
):
result = sample_closed(min_nu, ecc)
assert_quantity_allclose(result[0], min_nu)
assert_quantity_allclose(result[-1], min_nu, atol=1e-14 * u.rad)
| {
"repo_name": "Juanlu001/poliastro",
"path": "tests/tests_twobody/test_sampling.py",
"copies": "1",
"size": "1913",
"license": "mit",
"hash": 7131775722595903000,
"line_mean": 28.890625,
"line_max": 87,
"alpha_frac": 0.6884474647,
"autogenerated": false,
"ratio": 2.7406876790830945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39291351437830946,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import matplotlib.colors as colors
import numpy as np
from scipy.interpolate import griddata
import pandas as pd
import seaborn as sns
sns.set_style("white")
from uintahtools.udaframe import UdaFrame, TerzaghiFrame, PorePressureMomentumFrame, BeamDeflectionFrame, Beam, BeamContourFrame
from uintahtools.uda import Variable
from uintahtools.terzaghi.terzaghi import terzaghi
#from uintahtools.elastica.large_deflection_fdm import small_deflection, large_deflection
class UdaPlot:
def __init__(self, uda):
self.uda = uda
self.FIGSIZE = (5, 3.8)
# self.df = UdaFrame(uda)
@staticmethod
def create(type, uda):
if type == "terzaghi":
return TerzaghiPlot(uda)
elif type == "porepressure_momentum":
return PorePressureMomentumPlot(uda)
elif type == "beam.deflection":
return BeamDeflectionPlot(uda)
elif type == "beam.contour":
print("Creating beam contour plot")
return BeamContourPlot(uda)
assert 0, "Bad shape creation: " + type
def plot(self):
fig = plt.figure(figsize=self.FIGSIZE)
fig.subplots_adjust(left=0.15)
ax = fig.add_subplot(111)
ax = self.df.plot_df(ax)
# self.plot_analytical(ax)
load = 54e3
number_of_cells = 100
beam = Beam(b=0.1, l=1, h=0.3, E=10e6)
xs, ys = small_deflection(load * beam.b, number_of_cells, beam)
ax.plot(xs, ys, color="gray", alpha=0.8, linestyle="solid",
lw=2, zorder=1, label="analytical")
ax.legend(fancybox=True, framealpha=0.8, frameon=True)
ax.yaxis.grid(True, linestyle="--")
# self.df.plot_df()
# Removing plot frame
# for side in ('right', 'top'):
# ax.spines[side].set_visible(False)
ax.set_xbound(lower=0, upper=1)
# ax.set_ybound(upper=0)
self.add_labels(ax)
# self.annotate()
self.add_legend()
def add_legend(self):
pass
def add_labels(self, ax):
xlabel, ylabel = self.labels()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def plot_analytical(self, ax):
pass
def labels(self):
xlabel, ylabel = "X", "Y"
return xlabel, ylabel
def annotate(self):
raise NotImplementedError
def display_plot(self, output):
if (output):
outfile = self.uda.swap_extension(
"pdf", number=True) if output == "std" else output
plt.savefig(outfile, dpi=300)
else:
plt.show()
class BeamContourPlot(UdaPlot):
def __init__(self, uda):
self.df = BeamContourFrame(uda)
super().__init__(uda)
def plot(self):
groups = self.df.groupby(by="time", as_index=False)
N = 6
for i, (name, group) in enumerate(groups):
if i == 0:
continue
plt.figure(num=i + 1, dpi=300)
ax = plt.gca()
ax.set_xlim(0, 1.2)
ax.set_ylim(-0.9, 0.1)
ax.set_xticks(np.arange(0, 1.2, 0.1))
ax.set_yticks(np.arange(-0.9, 0.1, 0.1))
ax.set_aspect('equal')
# Plot grid.
ax.grid(c='k', ls='-', alpha=0.3)
triang = tri.Triangulation(group.x, group.y)
print(triang.x)
treshold = 0.5
for [v1, v2, v3] in triang.triangles:
print(v1, v2, v3, end="\t")
print(triang.x[v1], triang.x[v2], triang.x[v3])
if abs(triang.x[v1] - triang.x[v2]) > treshold or abs(triang.x[v2] - triang.x[v3]) > treshold or abs(triang.x[v1] - triang.x[v3]) > treshold:
print("\t\t THIS TRIANGLE SHOULD BE MASKED!!!")
# triang.set_mask()
ax.triplot(triang, 'bo-', markersize=0.5, lw=0.1)
cs = plt.tricontour(group.x, group.y,
group["p.porepressure"], N, colors='k', linewidths=0.7, extend="neither")
plt.clabel(cs, fmt="%1.2g", fontsize=7.5) # manual=True
plt.tricontourf(group.x, group.y,
group["p.porepressure"], N, cmap=plt.get_cmap("binary"), alpha=0.5, extend="neither")
# plt.scatter(group.x, group.y,
# c=group["p.porepressure"], s=0.6, cmap=plt.get_cmap("binary"))
class BeamDeflectionPlot(UdaPlot):
def __init__(self, uda):
self.df = BeamDeflectionFrame(uda)
super().__init__(uda)
self.FIGSIZE = (5, 3.8)
def labels(self):
xlabel = "Position from fixed end $x$"
ylabel = "Beam deflection $y$"
return xlabel, ylabel
def plot_analytical(self, ax):
add_to_plot = partial(
ax.plot, color="gray", alpha=0.8,
linestyle="solid", linewidth=2, zorder=1)
load = 54e3
number_of_cells = 100
beam = Beam(b=0.1, l=1, h=0.3, E=10e6)
xs, ys = small_deflection(load * beam.b, number_of_cells, beam)
add_to_plot(xs, ys, label="analytical")
class PorePressureMomentumPlot(UdaPlot):
def __init__(self, uda):
self.df = PorePressureMomentumFrame(uda)
super(PorePressureMomentumPlot, self).__init__(uda)
self.FIGSIZE = (5, 6)
def plot_analytical(self, ax):
pass
def plot(self):
fig = plt.figure(figsize=self.FIGSIZE)
ax = fig.add_subplot(111)
# self.plot_analytical(ax)
self.df.plot_df(ax)
# self.df.plot_df()
# Removing plot frame
# for side in ('right', 'top'):
# ax.spines[side].set_visible(False)
ax.set_xbound(lower=0, upper=1)
# ax.set_ybound(lower=0, upper=1)
self.add_labels(ax)
# self.annotate()
self.add_legend()
def add_legend(self):
plt.legend(loc=0)
def labels(self):
xlabel = "Position along beam $x/L$"
ylabel = "Normalized pore pressure momentum $M_p^*$"
return xlabel, ylabel
def annotate(self):
pass
class TerzaghiPlot(UdaPlot):
def __init__(self, uda):
super().__init__(uda)
self.df = TerzaghiFrame(uda)
def add_legend(self):
plt.legend(bbox_to_anchor=(0.7, 0), loc=4)
def labels(self):
xlabel = "Normalized pore pressure $p/p_0$"
ylabel = "Normalized depth $z/H$"
return xlabel, ylabel
def annotate(self):
"""Annotate the isochrones."""
# Creating labels
pos = [(0.22, 0.15),
(0.27, 0.25),
(0.51, 0.33),
(0.655, 0.34),
(0.87, 0.35),
(0.87, 0.5),
(0.87, 0.6),
(0.87, 0.7),
(0.8, 0.85)
]
for i, time in enumerate(reversed(self.uda.timeseries)):
label = "$T = " + str(time) + "$"
plt.figtext(*pos[i], label, horizontalalignment="left")
def plot_analytical(self,
ax,
zs=[],
samples=40,
maxj=25,
time=False):
"""Compute and plot analytical solution.
Two options:
1. porepressure vs depth (z)
2. porepressure vs time (t)
"""
func = terzaghi
timeseries = self.uda.timeseries
add_to_plot = partial(
ax.plot, color="gray", alpha=0.8,
linestyle="solid", linewidth=2, zorder=1)
if not zs:
zs = [z / samples for z in range(samples + 1)]
if not timeseries:
timeseries = np.logspace(-5, 1, num=samples)
if time:
for z in zs:
pores = [func(t, z, maxj) for t in timeseries]
add_to_plot(timeseries, pores)
else:
legend_entry = False
for timefactor in timeseries:
pores = [func(timefactor, z, maxj) for z in zs]
if legend_entry:
add_to_plot(pores, zs)
else:
add_to_plot(pores, zs, label="analytical")
legend_entry = True
| {
"repo_name": "hildenost/uintahtools",
"path": "uintahtools/udaplot.py",
"copies": "1",
"size": "8283",
"license": "mit",
"hash": 1501587209170649000,
"line_mean": 29.2299270073,
"line_max": 157,
"alpha_frac": 0.532777979,
"autogenerated": false,
"ratio": 3.3345410628019323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43673190418019325,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
def my_kde_bandwidth(obj, fac=1. / 5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1. / (obj.d + 4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/scipy-master/doc/source/tutorial/stats/plots/kde_plot4.py",
"copies": "1",
"size": "1463",
"license": "mit",
"hash": 965169777738911700,
"line_mean": 32.25,
"line_max": 78,
"alpha_frac": 0.6466165414,
"autogenerated": false,
"ratio": 2.398360655737705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8544977197137705,
"avg_score": 0,
"num_lines": 44
} |
from functools import partial
import mock
from mock import call
from pytest import raises as assert_raises
from rhino.errors import NotFound
from rhino.mapper import Mapper
from rhino.resource import Resource, get
from rhino.response import ok
from rhino.test import TestClient
class CallbackError(Exception): pass
class HandlerError(Exception): pass
class MapperError(Exception): pass
class Wrapper(object):
def __init__(self, wrapped, exceptions=None):
self.wrapped = wrapped
self.request = None
self.response = None
self.exceptions = exceptions or []
self.cb = mock.create_autospec(lambda *args, **kw: 1)
def error(self, msg, *args, **kw):
raise CallbackError(msg)
def __call__(self, request, ctx):
self.request = request
for cb_phase in ('enter leave finalize teardown close'.split()):
if cb_phase in self.exceptions:
ctx.add_callback(cb_phase, partial(self.cb, cb_phase + '-pre'))
ctx.add_callback(cb_phase, partial(self.error, cb_phase))
ctx.add_callback(cb_phase, partial(self.cb, cb_phase + '-post'))
else:
ctx.add_callback(cb_phase, partial(self.cb, cb_phase))
self.response = self.wrapped(request, ctx)
return self.response
def test_callbacks():
@get
def handler(request):
return ok('test')
wrapper = Wrapper(Resource(handler))
app = Mapper()
app.add('/', wrapper)
client = TestClient(app.wsgi)
res = client.get('/')
assert res.code == 200
wrapper.cb.assert_has_calls([
call('enter', wrapper.request),
call('leave', wrapper.request, wrapper.response),
call('finalize', wrapper.request, wrapper.response),
call('teardown'),
call('close'),
])
def test_callbacks_exception():
not_found = NotFound()
@get
def handler(request):
raise not_found
wrapper = Wrapper(Resource(handler))
app = Mapper()
app.add('/', wrapper)
client = TestClient(app.wsgi)
res = client.get('/')
assert res.code == 404
wrapper.cb.assert_has_calls([
call('enter', wrapper.request),
call('teardown'),
call('close'),
])
def test_teardown_callbacks_swallow_exceptions():
@get
def handler(request):
return ok('test')
wrapper = Wrapper(Resource(handler), exceptions=('teardown,'))
app = Mapper()
app.add('/', wrapper)
client = TestClient(app.wsgi)
res = client.get('/')
assert res.code == 200
wrapper.cb.assert_has_calls([
call('enter', wrapper.request),
call('leave', wrapper.request, wrapper.response),
call('finalize', wrapper.request, wrapper.response),
call('teardown-pre'),
call('teardown-post'),
call('close'),
])
def test_teardown_callbacks_run_after_wsgi_response_error():
@get
def handler(request):
return 1 # Not a valid response
wrapper = Wrapper(Resource(handler))
app = Mapper()
app.add('/', wrapper)
client = TestClient(app.wsgi)
assert_raises(TypeError, client.get, '/')
wrapper.cb.assert_has_calls([
call('enter', wrapper.request),
call('leave', wrapper.request, wrapper.response),
call('finalize', wrapper.request, wrapper.response),
call('teardown'),
])
def test_teardown_callbacks_run_after_handle_error_exception():
class TestMapper(Mapper):
def handle_error(self, request, ctx):
raise MapperError
@get
def handler(request):
raise HandlerError
wrapper = Wrapper(Resource(handler))
app = TestMapper()
app.add('/', wrapper)
client = TestClient(app.wsgi)
assert_raises(MapperError, client.get, '/')
wrapper.cb.assert_has_calls([
call('enter', wrapper.request),
call('teardown'),
])
def test_finalize_callbacks_before_conditional_response():
def finalize_cb(req, res):
res.headers.add('ETag', '"1"')
@get
def handler(request, ctx):
ctx.add_callback('finalize', finalize_cb)
return ok('test')
app = Mapper()
app.add('/', handler)
client = TestClient(app.wsgi)
res = client.get('/', if_none_match='"1"')
assert res.code == 304
| {
"repo_name": "trendels/rhino",
"path": "test/test_callbacks.py",
"copies": "1",
"size": "4315",
"license": "mit",
"hash": 9016096632409556000,
"line_mean": 24.5325443787,
"line_max": 80,
"alpha_frac": 0.6164542294,
"autogenerated": false,
"ratio": 3.958715596330275,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009835929010956882,
"num_lines": 169
} |
from functools import partial
import nltk
from knx.text.postagger.base import map_paren, reverse_map_paren
from BS.knx.text.tokenizer import default_tokenizer as tokenizer
try:
from textblob_aptagger import PerceptronTagger
perceptron_tagger = PerceptronTagger()
SYMBOLS = {'@', '#', '%', '^', '*', '+', '=', '~'}
# Replace the original tag method to support tokenized text
def _tag(self, corpus, tokenize=True):
"""Tags a string `corpus`."""
# Assume untokenized corpus has \n between sentences and ' ' between words
s_split = nltk.sent_tokenize if tokenize else lambda text: [text]
w_split = tokenizer.tokenize if tokenize else lambda sent: sent
def split_sents(corpus):
for s in s_split(corpus):
yield map(map_paren, w_split(s))
prev, prev2 = self.START
has_open_left_single_quote = False
tokens = []
for words in split_sents(corpus):
context = self.START + [self._normalize(w) for w in words] + self.END
for i, word in enumerate(words):
tag = self.tagdict.get(word)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag = self.model.predict(features)
pos = tag
if word in SYMBOLS:
pos = 'SYM'
elif word == "'" and pos == 'POS' and has_open_left_single_quote:
pos = "''"
has_open_left_single_quote = False
elif word == "'" and pos == "''":
has_open_left_single_quote = False
elif word == '`' and pos == '``':
has_open_left_single_quote = True
word = reverse_map_paren(word)
tokens.append((word, pos))
prev2 = prev
prev = pos
return tokens
perceptron_tagger.tag = partial(_tag, perceptron_tagger)
except: # pragma: no cover
raise NotImplementedError('PerceptronTagger from textblob_aptagger does not exist!')
def tag(text):
"""Returns the POS tags of the text using PerceptronTagger
Parameters
----------
text : str or iterable
This is the text to be processed.
If it's a str, it will be sentence tokenized and word tokenized using nltk
If it's an iterable, it will be assumed to be a list of tokens
Returns
-------
tags : list
List of (word, pos) tuples
"""
if type(text) in {str, unicode}:
return perceptron_tagger.tag(text, tokenize=True)
else:
return perceptron_tagger.tag(text, tokenize=False)
if __name__ == '__main__':
import time
start_time = time.time()
print tag('The horse raced past the barn fell.')
print 'Done tagging in %.3fs' % (time.time() - start_time)
start_time = time.time()
print tag(['The', 'horse', 'raced', 'past', 'the', 'barn', 'fell', '.'])
print 'Done tagging (tokenized) in %.3fs' % (time.time() - start_time)
while True:
sentence = raw_input('Enter a sentence: ')
start_time = time.time()
print tag(sentence)
print 'Done in %.3fs' % (time.time() - start_time)
| {
"repo_name": "gofortargets/CNN_brandsafety",
"path": "knx/text/postagger/perceptron_tagger.py",
"copies": "1",
"size": "3257",
"license": "apache-2.0",
"hash": 5523235570889157000,
"line_mean": 36.4367816092,
"line_max": 88,
"alpha_frac": 0.5677003377,
"autogenerated": false,
"ratio": 3.8408018867924527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49085022244924525,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from hypothesis import example, given, settings, strategies as st
from poliastro.twobody.sampling import sample_closed
angles = partial(st.floats, min_value=-2 * np.pi, max_value=2 * np.pi)
eccentricities = partial(st.floats, min_value=0, max_value=1, exclude_max=True)
@st.composite
def with_units(draw, elements, unit):
angle = draw(elements)
return angle * unit
angles_q = partial(with_units, elements=angles(), unit=u.rad)
eccentricities_q = partial(with_units, elements=eccentricities(), unit=u.one)
@settings(deadline=None)
@given(
min_nu=angles_q(),
ecc=eccentricities_q(),
max_nu=st.one_of(angles_q(), st.none()),
)
def test_sample_closed_is_always_between_minus_pi_and_pi(min_nu, ecc, max_nu):
result = sample_closed(min_nu, ecc, max_nu)
assert ((-np.pi * u.rad <= result) & (result <= np.pi * u.rad)).all()
@settings(deadline=None)
@given(
min_nu=with_units(
elements=st.floats(min_value=-np.pi, max_value=np.pi, exclude_max=True),
unit=u.rad,
),
ecc=eccentricities_q(),
max_nu=st.one_of(angles_q(), st.none()),
)
@example(0 * u.rad, 0 * u.one, 0 * u.rad)
def test_sample_closed_starts_at_min_anomaly_if_in_range(min_nu, ecc, max_nu):
result = sample_closed(min_nu, ecc, max_nu)
assert_quantity_allclose(result[0], min_nu, atol=1e-15 * u.rad)
@settings(deadline=None)
@given(
min_nu=with_units(
elements=st.floats(min_value=-np.pi, max_value=np.pi), unit=u.rad
),
ecc=eccentricities_q(),
)
@example(1e-16 * u.rad, 0 * u.one)
@example(0 * u.rad, 0 * u.one)
@example(0 * u.rad, 0.88680956 * u.one)
def test_sample_closed_starts_and_ends_at_min_anomaly_if_in_range_and_no_max_given(
min_nu, ecc
):
result = sample_closed(min_nu, ecc)
assert_quantity_allclose(result[0], min_nu, atol=1e-14 * u.rad)
assert_quantity_allclose(result[-1], min_nu, atol=1e-14 * u.rad)
| {
"repo_name": "poliastro/poliastro",
"path": "tests/tests_twobody/test_sampling.py",
"copies": "1",
"size": "2028",
"license": "mit",
"hash": 6542720281068257000,
"line_mean": 28.8235294118,
"line_max": 83,
"alpha_frac": 0.6750493097,
"autogenerated": false,
"ratio": 2.6825396825396823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3857588992239682,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
from scipy import array, r_, ones, arange, sort, diag, cos, rand, pi
from scipy.linalg import eigh, orth, cho_factor, cho_solve
import scipy.sparse
from scipy.sparse.linalg import lobpcg
from scipy.sparse.linalg.interface import LinearOperator
def _sakurai(n):
""" Example taken from
T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima
A moment-based method for large-scale generalized eigenvalue problems
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """
A = scipy.sparse.eye(n, n)
d0 = array(r_[5, 6*ones(n-2), 5])
d1 = -4*ones(n)
d2 = ones(n)
B = scipy.sparse.spdiags([d2, d1, d0, d1, d2], [-2, -1, 0, 1, 2], n, n)
k = arange(1, n+1)
w_ex = sort(1. / (16.*pow(cos(0.5*k*pi/(n+1)), 4))) # exact eigenvalues
return A, B, w_ex
def _mikota_pair(n):
# Mikota pair acts as a nice test since the eigenvalues
# are the squares of the integers n, n=1,2,...
x = arange(1, n + 1)
B = diag(1. / x)
y = arange(n - 1, 0, -1)
z = arange(2 * n - 1, 0, -2)
A = diag(z) - diag(y, -1) - diag(y, 1)
return A.astype(float), B.astype(float)
def _as2d(ar):
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _precond(LorU, lower, x):
y = cho_solve((LorU, lower), x)
return _as2d(y)
class Bench(Benchmark):
params = [
[],
['lobpcg', 'eigh']
]
param_names = ['n', 'solver']
def __init__(self):
self.time_mikota.__func__.params = list(self.params)
self.time_mikota.__func__.params[0] = [128, 256, 512, 1024, 2048]
self.time_mikota.__func__.setup = self.setup_mikota
self.time_sakurai.__func__.params = list(self.params)
self.time_sakurai.__func__.params[0] = [50, 400]
self.time_sakurai.__func__.setup = self.setup_sakurai
def setup_mikota(self, n, solver):
self.shape = (n, n)
self.A, self.B = _mikota_pair(n)
if solver == 'eigh' and n >= 512:
# skip: slow, and not useful to benchmark
raise NotImplementedError()
def setup_sakurai(self, n, solver):
self.shape = (n, n)
self.A, self.B, all_eigenvalues = _sakurai(n)
self.A_dense = self.A.A
self.B_dense = self.B.A
def time_mikota(self, n, solver):
m = 10
if solver == 'lobpcg':
X = rand(n, m)
X = orth(X)
LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
M = LinearOperator(self.shape,
matvec=partial(_precond, LorU, lower),
matmat=partial(_precond, LorU, lower))
eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
else:
eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m - 1))
def time_sakurai(self, n, solver):
m = 3
if solver == 'lobpcg':
X = rand(n, m)
eigs, vecs, resnh = lobpcg(self.A, X, self.B, tol=1e-6, maxiter=500,
retResidualNormsHistory=1)
else:
eigh(self.A_dense, self.B_dense, eigvals_only=True, eigvals=(0, m - 1))
# Retain old benchmark results (remove this if changing the benchmark)
time_mikota.version = "a1fb679758f7e5cf79d18cc4930afdff999fccc142fe7a4f63e73b39ab1f58bb"
time_sakurai.version = "7c38d449924fb71f777bd408072ecc883b8b05e53a6544e97da3887fbc10b235"
| {
"repo_name": "andyfaff/scipy",
"path": "benchmarks/benchmarks/sparse_linalg_lobpcg.py",
"copies": "13",
"size": "3645",
"license": "bsd-3-clause",
"hash": 7824599550654018000,
"line_mean": 31.8378378378,
"line_max": 93,
"alpha_frac": 0.5670781893,
"autogenerated": false,
"ratio": 2.8610675039246467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9928145693224647,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from toolbox.image import bicubic_rescale
from toolbox.image import modcrop
from toolbox.paths import data_dir
def load_set(name, lr_sub_size=11, lr_sub_stride=5, scale=3):
hr_sub_size = lr_sub_size * scale
hr_sub_stride = lr_sub_stride * scale
lr_gen_sub = partial(generate_sub_images, size=lr_sub_size,
stride=lr_sub_stride)
hr_gen_sub = partial(generate_sub_images, size=hr_sub_size,
stride=hr_sub_stride)
lr_sub_arrays = []
hr_sub_arrays = []
for path in (data_dir / name).glob('*'):
lr_image, hr_image = load_image_pair(str(path), scale=scale)
lr_sub_arrays += [img_to_array(img) for img in lr_gen_sub(lr_image)]
hr_sub_arrays += [img_to_array(img) for img in hr_gen_sub(hr_image)]
x = np.stack(lr_sub_arrays)
y = np.stack(hr_sub_arrays)
return x, y
def load_image_pair(path, scale=3):
image = load_img(path)
image = image.convert('YCbCr')
hr_image = modcrop(image, scale)
lr_image = bicubic_rescale(hr_image, 1 / scale)
return lr_image, hr_image
def generate_sub_images(image, size, stride):
for i in range(0, image.size[0] - size + 1, stride):
for j in range(0, image.size[1] - size + 1, stride):
yield image.crop([i, j, i + size, j + size])
| {
"repo_name": "qobilidop/srcnn",
"path": "toolbox/data.py",
"copies": "1",
"size": "1463",
"license": "mit",
"hash": -3030628448007750700,
"line_mean": 33.8333333333,
"line_max": 76,
"alpha_frac": 0.6384142174,
"autogenerated": false,
"ratio": 3.022727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9161141490127274,
"avg_score": 0,
"num_lines": 42
} |
from functools import partial
import numpy as np
from pathlib import Path
from menpo.base import LazyList
from menpo.image import Image, MaskedImage, BooleanImage
from menpo.image.base import normalize_pixels_range, channels_to_front
def _pil_to_numpy(pil_image, normalize, convert=None):
p = pil_image.convert(convert) if convert else pil_image
p = np.asarray(p)
if normalize:
return normalize_pixels_range(p)
else:
return p
def pillow_importer(filepath, asset=None, normalize=True, **kwargs):
r"""
Imports an image using PIL/pillow.
Different image modes cause different importing strategies.
RGB, L, I:
Imported as either `float` or `uint8` depending on normalisation flag.
RGBA:
Imported as :map:`MaskedImage` if normalize is ``True`` else imported
as a 4 channel `uint8` image.
1:
Imported as a :map:`BooleanImage`. Normalisation is ignored.
F:
Imported as a floating point image. Normalisation is ignored.
Parameters
----------
filepath : `Path`
Absolute filepath of image
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
normalize : `bool`, optional
If ``True``, normalize between 0.0 and 1.0 and convert to float. If
``False`` just pass whatever PIL imports back (according
to types rules outlined in constructor).
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
image : :map:`Image` or subclass
The imported image.
"""
import PIL.Image as PILImage
if isinstance(filepath, Path):
filepath = str(filepath)
pil_image = PILImage.open(filepath)
mode = pil_image.mode
if mode == "RGBA":
# If normalize is False, then we return the alpha as an extra
# channel, which can be useful if the alpha channel has semantic
# meanings!
if normalize:
alpha = np.array(pil_image)[..., 3].astype(bool)
image_pixels = _pil_to_numpy(pil_image, True, convert="RGB")
image = MaskedImage.init_from_channels_at_back(image_pixels, mask=alpha)
else:
# With no normalisation we just return the pixels
image = Image.init_from_channels_at_back(_pil_to_numpy(pil_image, False))
elif mode in ["L", "I", "RGB"]:
# Greyscale, Integer and RGB images
image = Image.init_from_channels_at_back(_pil_to_numpy(pil_image, normalize))
elif mode == "1":
# Convert to 'L' type (http://stackoverflow.com/a/4114122/1716869).
# Can't normalize a binary image
image = BooleanImage(_pil_to_numpy(pil_image, False, convert="L"), copy=True)
elif mode == "P":
# Convert pallete images to RGB
image = Image.init_from_channels_at_back(
_pil_to_numpy(pil_image, normalize, convert="RGB")
)
elif mode == "F": # Floating point images
# Don't normalize as we don't know the scale
image = Image.init_from_channels_at_back(_pil_to_numpy(pil_image, False))
else:
raise ValueError("Unexpected mode for PIL: {}".format(mode))
return image
def abs_importer(filepath, asset=None, **kwargs):
r"""
Allows importing the ABS file format from the FRGC dataset.
The z-min value is stripped from the image to make it renderable.
Parameters
----------
filepath : `Path`
Absolute filepath of the file.
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
image : :map:`Image` or subclass
The imported image.
"""
import re
with open(str(filepath), "r") as f:
# Currently these are unused, but they are in the format
# Could possibly store as metadata?
# Assume first result for regexes
re_rows = re.compile("([0-9]+) rows")
n_rows = int(re_rows.findall(f.readline())[0])
re_cols = re.compile("([0-9]+) columns")
n_cols = int(re_cols.findall(f.readline())[0])
# This also loads the mask
# >>> image_data[:, 0]
image_data = np.loadtxt(str(filepath), skiprows=3, unpack=True)
# Replace the lowest value with nan so that we can render properly
data_view = image_data[:, 1:]
corrupt_value = np.min(data_view)
data_view[np.any(np.isclose(data_view, corrupt_value), axis=1)] = np.nan
return MaskedImage(
np.rollaxis(np.reshape(data_view, [n_rows, n_cols, 3]), -1),
np.reshape(image_data[:, 0], [n_rows, n_cols]).astype(bool),
copy=False,
)
def flo_importer(filepath, asset=None, **kwargs):
r"""
Allows importing the Middlebury FLO file format.
Parameters
----------
filepath : `Path`
Absolute filepath of the file.
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
image : :map:`Image` or subclass
The imported image.
"""
with open(str(filepath), "rb") as f:
fingerprint = f.read(4)
if fingerprint != b"PIEH":
raise ValueError("Invalid FLO file.")
width, height = np.fromfile(f, dtype=np.uint32, count=2)
# read the raw flow data (u0, v0, u1, v1, u2, v2,...)
rawData = np.fromfile(f, dtype=np.float32, count=width * height * 2)
shape = (height, width)
u_raw = rawData[::2].reshape(shape)
v_raw = rawData[1::2].reshape(shape)
uv = np.vstack([u_raw[None, ...], v_raw[None, ...]])
return Image(uv, copy=False)
def imageio_importer(filepath, asset=None, normalize=True, **kwargs):
r"""
Imports images using the imageio library - which is actually fairly similar
to our importing logic - but contains the necessary plugins to import lots
of interesting image types like RAW images.
Parameters
----------
filepath : `Path`
Absolute filepath of the image.
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
normalize : `bool`, optional
If ``True``, normalize between 0.0 and 1.0 and convert to float. If
``False`` just return whatever imageio imports.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
image : :map:`Image` or subclass
The imported image.
"""
import imageio
pixels = imageio.imread(str(filepath))
pixels = channels_to_front(pixels)
transparent_types = {".png"}
if pixels.shape[0] == 4 and filepath.suffix in transparent_types:
# If normalize is False, then we return the alpha as an extra
# channel, which can be useful if the alpha channel has semantic
# meanings!
if normalize:
p = normalize_pixels_range(pixels[:3])
return MaskedImage(p, mask=pixels[-1].astype(bool), copy=False)
else:
return Image(pixels, copy=False)
# Assumed not to have an Alpha channel
if normalize:
return Image(normalize_pixels_range(pixels), copy=False)
else:
return Image(pixels, copy=False)
def imageio_gif_importer(filepath, asset=None, normalize=True, **kwargs):
r"""
Imports GIF images using freeimagemulti plugin from the imageio library.
Returns a :map:`LazyList` that gives lazy access to the GIF on a per-frame
basis.
Parameters
----------
filepath : `Path`
Absolute filepath of the video.
asset : `object`, optional
An optional asset that may help with loading. This is unused for this
implementation.
normalize : `bool`, optional
If ``True``, normalize between 0.0 and 1.0 and convert to float. If
``False`` just return whatever imageio imports.
\**kwargs : `dict`, optional
Any other keyword arguments.
Returns
-------
image : :map:`LazyList`
A :map:`LazyList` containing :map:`Image` or subclasses per frame
of the GIF.
"""
import imageio
reader = imageio.get_reader(str(filepath), format="gif", mode="I")
def imageio_to_menpo(imio_reader, index):
pixels = imio_reader.get_data(index)
pixels = channels_to_front(pixels)
if pixels.shape[0] == 4:
# If normalize is False, then we return the alpha as an extra
# channel, which can be useful if the alpha channel has semantic
# meanings!
if normalize:
p = normalize_pixels_range(pixels[:3])
return MaskedImage(p, mask=pixels[-1].astype(bool), copy=False)
else:
return Image(pixels, copy=False)
# Assumed not to have an Alpha channel
if normalize:
return Image(normalize_pixels_range(pixels), copy=False)
else:
return Image(pixels, copy=False)
index_callable = partial(imageio_to_menpo, reader)
ll = LazyList.init_from_index_callable(index_callable, reader.get_length())
return ll
| {
"repo_name": "menpo/menpo",
"path": "menpo/io/input/image.py",
"copies": "2",
"size": "9334",
"license": "bsd-3-clause",
"hash": 4886374634795868000,
"line_mean": 33.0656934307,
"line_max": 85,
"alpha_frac": 0.6222412685,
"autogenerated": false,
"ratio": 3.9234972677595628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021475747724016426,
"num_lines": 274
} |
from functools import partial
import numpy as np
from PIL import Image, ImageTk
import pygame
PIXEL_SIZE = 6
PHOTO_SIZE = 64
DEFAULT_BRUSH_SIZE = 3
BG_COLOR = (200, 200, 200)
LIGHT = (255, 255, 255)
DARK = (150, 150, 150)
class CenteredSurface(pygame.Surface):
def __init__(self, size, content):
super(CenteredSurface, self).__init__(size, pygame.SRCALPHA, 32)
self.convert_alpha()
content_size = np.array(content.get_rect().size)
surf_size = np.array(self.get_rect().size)
offset = (surf_size - content_size) // 2
self.blit(content, offset)
class TextSurface(pygame.Surface):
def __init__(self, text):
font = pygame.font.SysFont('Arial', 15)
surf = font.render(text, True, (0, 0, 0))
size = surf.get_rect().size
super(TextSurface, self).__init__(size, pygame.SRCALPHA, 32)
self.convert_alpha()
self.blit(surf, (0, 0))
class ColorSurface(pygame.Surface):
def __init__(self, color, size):
super(ColorSurface, self).__init__(size)
self.fill(color)
class ImageSurface(pygame.Surface):
def __init__(self, fname, size):
super(ImageSurface, self).__init__(size, pygame.SRCALPHA, 32)
self.convert_alpha()
img_surf = pygame.image.load(fname)
surf = pygame.transform.scale(img_surf, size)
self.blit(surf, (0, 0))
class BorderSurface(pygame.Surface):
def __init__(self, color, size, width=1):
super(BorderSurface, self).__init__(size, pygame.SRCALPHA, 32)
self.convert_alpha()
pygame.draw.rect(self, color, ((0, 0), size), width)
class BrushCursor(pygame.sprite.DirtySprite):
def __init__(self, size=DEFAULT_BRUSH_SIZE, color=(0, 0, 0), lim_rect=None):
super(BrushCursor, self).__init__()
self.size = (size, size)
self.color = color
self.lim_rect = lim_rect
self.rect = None
self.update_size()
self.update_color()
self.rect = self.image.get_rect()
self.move()
def move(self):
m_pos = pygame.mouse.get_pos()
rel_pos = (m_pos[0]-self.lim_rect.x, m_pos[1]-self.lim_rect.y)
grid_pos = (self._grid(rel_pos[0]), self._grid(rel_pos[1])+1)
self.rect.topleft = (grid_pos[0]+self.lim_rect.x, grid_pos[1]+self.lim_rect.y-1)
if self.lim_rect is not None:
self.rect.clamp_ip(self.lim_rect)
self.dirty = 1
def update_color(self, color=None):
if color is not None:
self.color = color
scaled_size = np.array(self.size) * PIXEL_SIZE
pygame.draw.rect(self.image, self.color, ((0, 0), scaled_size), 1)
self.dirty = 1
def update_size(self, size=None):
if size is not None:
self.size = (size, size)
if self.rect is not None:
old_topleft = self.rect.topleft
scaled_size = np.array(self.size) * PIXEL_SIZE
self.image = BorderSurface(self.color, scaled_size)
if self.rect is not None:
self.rect = self.image.get_rect()
self.rect.topleft = old_topleft
if self.lim_rect is not None:
self.rect.clamp_ip(self.lim_rect)
self.dirty = 1
def get_rel_brush_coords(self):
origin = np.array(self.lim_rect.topleft)
rel_topleft = np.array(self.rect.topleft) - origin
rel_botright = np.array(self.rect.bottomright) - origin
return tuple(rel_topleft // PIXEL_SIZE) + tuple(rel_botright // PIXEL_SIZE)
def _grid(self, coord):
return int(coord/float(PIXEL_SIZE))*PIXEL_SIZE
class PhotoCanvas(pygame.sprite.DirtySprite):
def __init__(self, base_image):
super(PhotoCanvas, self).__init__()
self.base_image = self._convert_image(base_image)
self.update_image()
self.rect = self.image.get_rect()
def update_image(self, img=None):
if img is not None:
self.base_image = self._convert_image(img)
im = self.base_image
self.image = pygame.image.fromstring(im.tobytes(), im.size, im.mode)
self.dirty = 1
def update_celeb_a(self, idx):
img = self._load_celeb_a(idx)
self.update_image(img)
def _convert_image(self, img):
full_size = PHOTO_SIZE*PIXEL_SIZE
return img.convert('RGB').resize((full_size, full_size))
def _gen_default_img(self):
return Image.fromarray(np.array([(0, 0, 0)]), mode='RGB')
class Button(pygame.sprite.DirtySprite):
def __init__(self, size, content, action=lambda: None):
super(Button, self).__init__()
if action is None:
action = lambda: None
self.action = action
self.content = content
self.pressed = None
self.image = pygame.Surface(size)
self.image.fill(BG_COLOR)
self.rect = self.image.get_rect()
self.update_content()
self.press()
def update_content(self, content=None):
if content is not None:
self.content = content
content_size = np.array(self.content.get_rect().size)
button_size = np.array(self.rect.size)
offset = (button_size - content_size) // 2
self.image.blit(self.content, offset)
self.dirty = 1
def press(self):
mouse_pos = pygame.mouse.get_pos()
mouse_pressed = pygame.mouse.get_pressed()[0]
btn_pressed = mouse_pressed and self.rect.collidepoint(mouse_pos)
if btn_pressed == self.pressed:
return
top_left = [
(1, self.rect.size[1]-3),
(1, 1),
(self.rect.size[0]-3, 1),
]
bottom_right = [
(1, self.rect.size[1]-3),
np.array(self.rect.size)-3,
(self.rect.size[0]-3, 1),
]
if btn_pressed:
pygame.draw.lines(self.image, DARK, False, top_left, 2)
pygame.draw.lines(self.image, LIGHT, False, bottom_right, 2)
else:
pygame.draw.lines(self.image, DARK, False, bottom_right, 2)
pygame.draw.lines(self.image, LIGHT, False, top_left, 2)
if self.pressed and not mouse_pressed:
self.action()
self.pressed = btn_pressed
self.dirty = 1
class SliderLever(Button):
SIZE = (15, 20)
def __init__(self):
super(SliderLever, self).__init__(self.SIZE, self._init_content())
def _init_content(self):
surf = pygame.Surface((8, 5), pygame.SRCALPHA, 32)
surf.convert_alpha()
pygame.draw.line(surf, DARK, (2, 0), (6, 0))
pygame.draw.line(surf, DARK, (2, 2), (6, 2))
pygame.draw.line(surf, DARK, (2, 4), (6, 4))
return surf
class Slider(pygame.sprite.DirtySprite):
def __init__(self, width, max_val, init_val=0, left_icon=None, right_icon=None, action=None):
super(Slider, self).__init__()
self.max_value = max_val
self.value = init_val
self.action = action
self.image = pygame.Surface((width, 20), pygame.SRCALPHA, 32)
self.image.convert_alpha()
self.rect = self.image.get_rect()
self.lever = SliderLever()
self.line_start = 0
self.line_width = width - self.lever.rect.width
if left_icon:
left_surf = CenteredSurface((20, 20), left_icon)
self.image.blit(left_surf, (0, 0))
self.line_width -= left_surf.get_rect().width
self.line_start = left_surf.get_rect().right
if right_icon:
right_surf = CenteredSurface((20, 20), right_icon)
pos = (self.rect.topright[0]-20, 0)
self.image.blit(right_surf, pos)
self.line_width -= right_surf.get_rect().width
self._draw_line()
self._move_lever()
def slide(self):
mouse_pos = pygame.mouse.get_pos()
mouse_pressed = pygame.mouse.get_pressed()[0]
if not mouse_pressed:
return
self.value = self._pos_to_val(mouse_pos)
self._move_lever()
if self.action is not None:
self.action(self.value)
def place(self, new_pos):
self.rect.midbottom = new_pos
self._move_lever()
def _val_to_pos(self, value):
dist = value * (self.line_width // self.max_value)
return self.line_start + dist
def _pos_to_val(self, pos):
l_bound = self.rect.left + self.line_start
r_bound = l_bound + self.line_width
if pos[0] < l_bound:
return 0
if pos[0] > r_bound:
return self.max_value
return round(float(pos[0] - l_bound) * self.max_value / self.line_width)
def _move_lever(self):
origin = self.rect.midleft
shifted = np.array(origin) + np.array((self._val_to_pos(self.value), 0))
self.lever.rect.midleft = shifted
self.lever.dirty = 1
def _draw_line(self):
left = (self.line_start + self.lever.rect.width//2, 10)
right = (left[0] + self.line_width, 10)
pygame.draw.line(self.image, DARK, left, right)
| {
"repo_name": "spellrun/Neural-Photo-Editor",
"path": "npe_backprop/util/assets.py",
"copies": "1",
"size": "9063",
"license": "mit",
"hash": -173559884869679740,
"line_mean": 31.0247349823,
"line_max": 97,
"alpha_frac": 0.5787266909,
"autogenerated": false,
"ratio": 3.3504621072088723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44291887981088723,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from PIL import Image, ImageTk
import pygame
PIXEL_SIZE = 6
PHOTO_SIZE = 64
DEFAULT_BRUSH_SIZE = 3
def TextSurface(text):
font = pygame.font.SysFont('Arial', 15)
return font.render(text, True, (0, 0, 0))
def ColorSurface(color, size):
surf = pygame.Surface(size)
surf.fill(color)
return surf
def ImageSurface(fname, size):
surf = pygame.image.load(fname)
return pygame.transform.scale(surf, size)
class BrushCursor(pygame.sprite.DirtySprite):
def __init__(self, size=DEFAULT_BRUSH_SIZE, color=(0, 0, 0), lim_rect=None):
super(BrushCursor, self).__init__()
self.size = (size, size)
self.color = color
self.lim_rect = lim_rect
self.update_size()
self.update_color()
self.rect = self.image.get_rect()
self.move()
def move(self):
m_pos = pygame.mouse.get_pos()
rel_pos = (m_pos[0]-self.lim_rect.x, m_pos[1]-self.lim_rect.y)
grid_pos = (self._grid(rel_pos[0]), self._grid(rel_pos[1])+1)
self.rect.topleft = (grid_pos[0]+self.lim_rect.x, grid_pos[1]+self.lim_rect.y-1)
if self.lim_rect is not None:
self.rect.clamp_ip(self.lim_rect)
self.dirty = 1
def update_color(self, color=None):
if color is not None:
self.color = color
scaled_size = np.array(self.size) * PIXEL_SIZE
pygame.draw.rect(self.image, self.color, ((0, 0), scaled_size), 1)
self.dirty = 1
def update_size(self, size=None):
if size is not None:
self.size = (size, size)
scaled_size = np.array(self.size) * PIXEL_SIZE
self.image = pygame.Surface(scaled_size, pygame.SRCALPHA, 32)
self.image.convert_alpha()
pygame.draw.rect(self.image, self.color, ((0, 0), scaled_size), 1)
self.dirty = 1
def get_rel_brush_coords(self):
origin = np.array(self.lim_rect.topleft)
rel_topleft = np.array(self.rect.topleft) - origin
rel_botright = np.array(self.rect.bottomright) - origin
return tuple(rel_topleft // PIXEL_SIZE) + tuple(rel_botright // PIXEL_SIZE)
def _grid(self, coord):
return int(coord/float(PIXEL_SIZE))*PIXEL_SIZE
class PhotoCanvas(pygame.sprite.DirtySprite):
def __init__(self, base_image):
super(PhotoCanvas, self).__init__()
self.base_image = self._convert_image(base_image)
self.update_image()
self.rect = self.image.get_rect()
def update_image(self, img=None):
if img is not None:
self.base_image = self._convert_image(img)
im = self.base_image
self.image = pygame.image.fromstring(im.tobytes(), im.size, im.mode)
self.dirty = 1
def update_celeb_a(self, idx):
img = self._load_celeb_a(idx)
self.update_image(img)
def _convert_image(self, img):
full_size = PHOTO_SIZE*PIXEL_SIZE
return img.convert('RGB').resize((full_size, full_size))
def _gen_default_img(self):
return Image.fromarray(np.array([(0, 0, 0)]), mode='RGB')
class Button(pygame.sprite.DirtySprite):
LIGHT = (255, 255, 255)
DARK = (150, 150, 150)
def __init__(self, size, content, action=lambda: None):
super(Button, self).__init__()
if action is None:
action = lambda: None
self.action = action
self.content = content
self.pressed = None
self.image = pygame.Surface(size)
self.image.fill((200, 200, 200))
self.rect = self.image.get_rect()
self.update_content()
self.press()
def update_content(self, content=None):
if content is not None:
self.content = content
content_size = np.array(self.content.get_rect().size)
button_size = np.array(self.rect.size)
offset = (button_size - content_size) // 2
self.image.blit(self.content, offset)
self.dirty = 1
def press(self):
mouse_pos = pygame.mouse.get_pos()
mouse_pressed = pygame.mouse.get_pressed()[0]
if not self.rect.collidepoint(mouse_pos):
mouse_pressed = False
if mouse_pressed == self.pressed:
return
top_left = [
(1, self.rect.size[1]-3),
(1, 1),
(self.rect.size[0]-3, 1),
]
bottom_right = [
(1, self.rect.size[1]-3),
np.array(self.rect.size)-3,
(self.rect.size[0]-3, 1),
]
if mouse_pressed:
pygame.draw.lines(self.image, self.DARK, False, top_left, 2)
pygame.draw.lines(self.image, self.LIGHT, False, bottom_right, 2)
else:
pygame.draw.lines(self.image, self.DARK, False, bottom_right, 2)
pygame.draw.lines(self.image, self.LIGHT, False, top_left, 2)
if self.pressed is not None:
self.action()
self.pressed = mouse_pressed
self.dirty = 1
| {
"repo_name": "spellrun/Neural-Photo-Editor",
"path": "npe/util/assets.py",
"copies": "1",
"size": "5003",
"license": "mit",
"hash": 1585017009548520000,
"line_mean": 29.3212121212,
"line_max": 88,
"alpha_frac": 0.5848490905,
"autogenerated": false,
"ratio": 3.3554661301140176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44403152206140173,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| {
"repo_name": "lukauskas/scipy",
"path": "doc/source/tutorial/stats/plots/kde_plot4.py",
"copies": "142",
"size": "1457",
"license": "bsd-3-clause",
"hash": -5344178218315971000,
"line_mean": 32.1136363636,
"line_max": 78,
"alpha_frac": 0.6492793411,
"autogenerated": false,
"ratio": 2.3963815789473686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from scipy.optimize import minimize
# constants
DIM = 1
INTERACTION = 1.
# data size
CUTOFF = 80
GRID_SIZE = 64
def kinetic_energy(fs, hopping):
"""Mean-field kinetic energy."""
return -DIM * hopping * np.square(
np.sum(np.sqrt(n + 1.) * fs[n] * fs[n + 1] for n in range(len(fs) - 1))
)
def num_particles(fs):
"""Mean-field occupation."""
return np.sum(n * fs[n] * fs[n] for n in range(len(fs)))
def on_site_energy(fs, mu):
"""Mean-field on-site energy."""
return -mu * num_particles(fs)
def interaction_energy(fs):
"""Mean-field Hubbard energy."""
return INTERACTION / 2. * np.sum(n * (n - 1.) * fs[n] * fs[n] for n in range(len(fs)))
def energy_per_site(fs, hopping, mu):
"""Mean-field total energy per site."""
return (kinetic_energy(fs, hopping) + on_site_energy(fs, mu) + interaction_energy(fs)) / DIM
def constraint_normalization(fs):
"""Normalization condition of wave-function."""
return np.square(fs).sum() - 1.
def init_fs(cutoff, kappa):
"""The kappa trial wave-function as initial state."""
res = np.array([
np.exp(-kappa * n * n / 2.) / np.sqrt(float(np.math.factorial(n)))
for n in range(cutoff)
])
res /= np.linalg.norm(res)
return res
def optimize(p1, p2):
"""Find mean-field state for J/U=p1 and mu/U=p2."""
init = init_fs(cutoff=CUTOFF, kappa=1.)
# the bound is crucial for convergence
res = minimize(
partial(energy_per_site, hopping=p1, mu=p2),
init,
bounds=[[0., 1.]] * CUTOFF,
constraints=[
{'type': 'eq', 'fun': constraint_normalization},
])
return res.x
def generate_data():
"""Generate grid of data for interpolation."""
res = []
for hopping in np.linspace(0.0, 0.12, GRID_SIZE):
for mu in np.linspace(2.0, 3.0, GRID_SIZE):
print(hopping, mu)
res.append(np.concatenate([[hopping, mu], optimize(hopping, mu)]))
res = np.array(res)
np.save(r'data_%d' % GRID_SIZE, np.array(res))
def load_data():
"""Draw the Mott lobes."""
res = np.load(r'data_%d.npy' % GRID_SIZE)
x = res[:, 0]
y = res[:, 1]
z = []
for i, entry in enumerate(res):
z.append(kinetic_energy(entry[2:], -1.))
plt.pcolor(
np.reshape(x, (GRID_SIZE, GRID_SIZE)),
np.reshape(y, (GRID_SIZE, GRID_SIZE)),
np.reshape(z, (GRID_SIZE, GRID_SIZE))
)
plt.xlabel('$dt/U$')
plt.ylabel('$\mu/U$')
plt.show()
if __name__ == '__main__':
import matplotlib.pyplot as plt
for i, J in enumerate(np.linspace(0, 0.12, 9)):
plt.subplot(3, 3, i + 1)
fs = optimize(J, 2.95)
plt.plot(fs, '-o')
plt.xlim([0, 10])
plt.tight_layout()
plt.show()
| {
"repo_name": "rhinech/snake",
"path": "bose_hubbard_mft.py",
"copies": "1",
"size": "2822",
"license": "mit",
"hash": -6431812747236697000,
"line_mean": 24.6545454545,
"line_max": 96,
"alpha_frac": 0.5754783841,
"autogenerated": false,
"ratio": 3.002127659574468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9074889687865328,
"avg_score": 0.0005432711618278629,
"num_lines": 110
} |
from functools import partial
import numpy as np
from scipy.spatial.distance import cdist as distance
from scipy.sparse import vstack as sparse_vstack
from oddt.utils import is_molecule
from oddt.docking import autodock_vina
from oddt.docking.internal import vina_docking
from oddt.fingerprints import sparse_to_csr_matrix
__all__ = ['close_contacts_descriptor',
'fingerprints',
'autodock_vina_descriptor',
'oddt_vina_descriptor']
def atoms_by_type(atom_dict, types, mode='atomic_nums'):
"""Returns atom dictionaries based on given criteria.
Currently we have 3 types of atom selection criteria:
* atomic numbers ['atomic_nums']
* Sybyl Atom Types ['atom_types_sybyl']
* AutoDock4 atom types ['atom_types_ad4'] (http://autodock.scripps.edu/faqs-help/faq/where-do-i-set-the-autodock-4-force-field-parameters)
Parameters
----------
atom_dict: oddt.toolkit.Molecule.atom_dict
Atom dictionary as implemeted in oddt.toolkit.Molecule class
types: array-like
List of atom types/numbers wanted.
Returns
-------
out: dictionary of shape=[len(types)]
A dictionary of queried atom types (types are keys of the dictionary).
Values are of oddt.toolkit.Molecule.atom_dict type.
"""
ad4_to_atomicnum = {
'HD': 1, 'C': 6, 'CD': 6, 'A': 6, 'N': 7, 'NA': 7, 'OA': 8, 'F': 9,
'MG': 12, 'P': 15, 'SA': 16, 'S': 16, 'CL': 17, 'CA': 20, 'MN': 25,
'FE': 26, 'CU': 29, 'ZN': 30, 'BR': 35, 'I': 53
}
if mode == 'atomic_nums':
return {num: atom_dict[atom_dict['atomicnum'] == num]
for num in set(types)}
elif mode == 'atom_types_sybyl':
return {t: atom_dict[atom_dict['atomtype'] == t]
for t in set(types)}
elif mode == 'atom_types_ad4':
# all AD4 atom types are capitalized
types = [t.upper() for t in types]
out = {}
for t in set(types):
if t in ad4_to_atomicnum:
constraints = (atom_dict['atomicnum'] == ad4_to_atomicnum[t])
# additoinal constraints for more specific atom types (donors,
# acceptors, aromatic etc)
if t == 'HD':
constraints &= atom_dict['isdonorh']
elif t == 'C':
constraints &= ~atom_dict['isaromatic']
elif t == 'CD':
# not canonical AD4 type, although used by NNscore, with no
# description
constraints &= ~atom_dict['isdonor']
elif t == 'A':
constraints &= atom_dict['isaromatic']
elif t in ('N', 'S'):
constraints &= ~atom_dict['isacceptor']
elif t in ('NA', 'OA', 'SA'):
constraints &= atom_dict['isacceptor']
out[t] = atom_dict[constraints]
else:
raise ValueError('Unsopported atom type: %s' % t)
else:
raise ValueError('Unsopported mode: %s' % mode)
return out
class close_contacts_descriptor(object):
def __init__(self,
protein=None,
cutoff=4,
mode='atomic_nums',
ligand_types=None,
protein_types=None,
aligned_pairs=False):
"""Close contacts descriptor which tallies atoms of type X in certain
cutoff from atoms of type Y.
Parameters
----------
protein: oddt.toolkit.Molecule or None (default=None)
Default protein to use as reference
cutoff: int or list, shape=[n,] or shape=[n,2] (default=4)
Cutoff for atoms in Angstroms given as an integer or a list of
ranges, eg. [0, 4, 8, 12] or [[0,4],[4,8],[8,12]].
Upper bound is always inclusive, lower exclusive.
mode: string (default='atomic_nums')
Method of atoms selection, as used in `atoms_by_type`
ligand_types: array
List of ligand atom types to use
protein_types: array
List of protein atom types to use
aligned_pairs: bool (default=False)
Flag indicating should permutation of types should be done,
otherwise the atoms are treated as aligned pairs.
"""
self.cutoff = np.atleast_1d(cutoff)
# Cutoffs in fomr of continuous intervals (0,2,4,6,...)
if len(self.cutoff) > 1 and self.cutoff.ndim == 1:
self.cutoff = np.vstack((self.cutoff[:-1],
self.cutoff[1:])).T
elif self.cutoff.ndim > 2:
raise ValueError('Unsupported shape of cutoff: %s' % self.cutoff.shape)
# for pickle save original value
self.original_cutoff = cutoff
self.protein = protein
self.ligand_types = ligand_types
self.protein_types = protein_types if protein_types else ligand_types
self.aligned_pairs = aligned_pairs
self.mode = mode
# setup titles
if len(self.cutoff) == 1:
self.titles = ['%s.%s' % (str(p), str(l))
for p in self.protein_types
for l in self.ligand_types
]
else:
self.titles = ['%s.%s_%s-%s' % (str(p), str(l), str(c1), str(c2))
for p in self.protein_types
for l in self.ligand_types
for c1, c2 in self.cutoff
]
def build(self, ligands, protein=None):
"""Builds descriptors for series of ligands
Parameters
----------
ligands: iterable of oddt.toolkit.Molecules or oddt.toolkit.Molecule
A list or iterable of ligands to build the descriptor or a
single molecule.
protein: oddt.toolkit.Molecule or None (default=None)
Default protein to use as reference
"""
if protein:
self.protein = protein
if is_molecule(ligands):
ligands = [ligands]
out = []
for mol in ligands:
mol_dict = atoms_by_type(mol.atom_dict, self.ligand_types, self.mode)
if self.aligned_pairs:
pairs = zip(self.ligand_types, self.protein_types)
else:
pairs = [(mol_type, prot_type)
for mol_type in self.ligand_types
for prot_type in self.protein_types]
dist = distance(self.protein.atom_dict['coords'],
mol.atom_dict['coords'])
within_cutoff = (dist <= self.cutoff.max()).any(axis=1)
local_protein_dict = self.protein.atom_dict[within_cutoff]
prot_dict = atoms_by_type(local_protein_dict, self.protein_types,
self.mode)
desc = []
for mol_type, prot_type in pairs:
d = distance(prot_dict[prot_type]['coords'],
mol_dict[mol_type]['coords'])[..., np.newaxis]
if len(self.cutoff) > 1:
count = ((d > self.cutoff[..., 0]) &
(d <= self.cutoff[..., 1])).sum(axis=(0, 1))
else:
count = (d <= self.cutoff).sum()
desc.append(count)
desc = np.array(desc, dtype=int).flatten()
out.append(desc)
return np.vstack(out)
def __len__(self):
""" Returns the dimensions of descriptors """
if self.aligned_pairs:
return len(self.ligand_types) * self.cutoff.shape[0]
else:
return len(self.ligand_types) * len(self.protein_types) * len(self.cutoff)
def __reduce__(self):
return close_contacts_descriptor, (self.protein,
self.original_cutoff,
self.mode,
self.ligand_types,
self.protein_types,
self.aligned_pairs)
class universal_descriptor(object):
def __init__(self,
func,
protein=None,
shape=None,
sparse=False):
"""An universal descriptor which converts a callable object (function)
to a descriptor generator which can be used in scoring methods.
.. versionadded:: 0.6
Parameters
----------
func: object
A function to be mapped accross all ligands. Can be any callable
object, which takes ligand as first argument and optionally
protein key word argument. Additional arguments should be set
using `functools.partial`.
protein: oddt.toolkit.Molecule or None (default=None)
Default protein to use as reference
shape: int or tuple (default=None)
The final shape of output
sparse: bool (default=True)
Flag to return sparse matrix
"""
self.func = func
self.protein = protein
self.shape = shape
self.sparse = sparse
if isinstance(func, partial):
self.titles = self.func.func.__name__
else:
self.titles = self.func.__name__
def build(self, ligands, protein=None):
"""Builds descriptors for series of ligands
Parameters
----------
ligands: iterable of oddt.toolkit.Molecules or oddt.toolkit.Molecule
A list or iterable of ligands to build the descriptor or a
single molecule.
protein: oddt.toolkit.Molecule or None (default=None)
Default protein to use as reference
"""
if protein:
self.protein = protein
if is_molecule(ligands):
ligands = [ligands]
out = []
for mol in ligands:
if self.protein is None:
out.append(self.func(mol))
else:
out.append(self.func(mol, protein=self.protein))
if self.sparse:
# out = list(map(partial(sparse_to_csr_matrix, size=self.shape), out))
return sparse_vstack(map(partial(sparse_to_csr_matrix,
size=self.shape), out),
format='csr')
else:
return np.vstack(out)
def __len__(self):
""" Returns the dimensions of descriptors """
if self.shape is None:
raise NotImplementedError('The length of descriptor is not defined')
else:
return self.shape
def __reduce__(self):
return universal_descriptor, (self.func, self.protein, self.shape,
self.sparse)
# TODO: we don't use toolkit. should we?
class fingerprints(object):
def __init__(self, fp='fp2', toolkit='ob'):
self.fp = fp
self.exchange = False
# if toolkit == oddt.toolkit.backend:
# self.exchange = False
# else:
# self.exchange = True
# self.target_toolkit = __import__('toolkits.'+toolkit)
def _get_fingerprint(self, mol):
if self.exchange:
mol = self.target_toolkit.Molecule(mol)
return mol.calcfp(self.fp).raw
def build(self, mols):
if is_molecule(mols):
mols = [mols]
out = []
for mol in mols:
fp = self._get_fingerprint(mol)
out.append(fp)
return np.vstack(out)
def __reduce__(self):
return fingerprints, ()
class autodock_vina_descriptor(object):
def __init__(self, protein=None, vina_scores=None):
self.protein = protein
self.vina = autodock_vina(protein)
self.vina_scores = vina_scores or ['vina_affinity',
'vina_gauss1',
'vina_gauss2',
'vina_repulsion',
'vina_hydrophobic',
'vina_hydrogen']
self.titles = self.vina_scores
def set_protein(self, protein):
self.protein = protein
self.vina.set_protein(protein)
def build(self, ligands, protein=None):
if protein:
self.set_protein(protein)
else:
protein = self.protein
if is_molecule(ligands):
ligands = [ligands]
desc = None
for mol in ligands:
# Vina
# TODO: Asynchronous output from vina, push command to score and retrieve at the end?
# TODO: Check if ligand has vina scores
scored_mol = self.vina.score(mol)[0].data
vec = np.array(([scored_mol[key] for key in self.vina_scores]),
dtype=np.float32).flatten()
if desc is None:
desc = vec
else:
desc = np.vstack((desc, vec))
return np.atleast_2d(desc)
def __len__(self):
""" Returns the dimensions of descriptors """
return len(self.vina_scores)
def __reduce__(self):
return autodock_vina_descriptor, (self.protein, self.vina_scores)
class oddt_vina_descriptor(object):
def __init__(self, protein=None, vina_scores=None):
self.protein = protein
self.vina = vina_docking(protein)
self.all_vina_scores = ['vina_affinity',
# inter-molecular interactions
'vina_gauss1',
'vina_gauss2',
'vina_repulsion',
'vina_hydrophobic',
'vina_hydrogen',
# intra-molecular interactions
'vina_intra_gauss1',
'vina_intra_gauss2',
'vina_intra_repulsion',
'vina_intra_hydrophobic',
'vina_intra_hydrogen',
'vina_num_rotors']
self.vina_scores = vina_scores or self.all_vina_scores
self.titles = self.vina_scores
def set_protein(self, protein):
self.protein = protein
self.vina.set_protein(protein)
def build(self, ligands, protein=None):
if protein:
self.set_protein(protein)
else:
protein = self.protein
if is_molecule(ligands):
ligands = [ligands]
desc = None
for mol in ligands:
mol_keys = mol.data.keys()
if any(x not in mol_keys for x in self.vina_scores):
self.vina.set_ligand(mol)
inter = self.vina.score_inter()
intra = self.vina.score_intra()
num_rotors = self.vina.num_rotors
# could use self.vina.score(), but better to reuse variables
affinity = ((inter * self.vina.weights[:5]).sum() /
(1 + self.vina.weights[5] * num_rotors))
assert len(self.all_vina_scores) == len(inter) + len(intra) + 2
score = dict(zip(
self.all_vina_scores,
np.hstack((affinity, inter, intra, num_rotors)).flatten()
))
mol.data.update(score)
else:
score = mol.data.to_dict()
try:
vec = np.array([score[s] for s in self.vina_scores],
dtype=np.float32).flatten()
except Exception as e:
print(score, affinity, inter, intra, num_rotors)
print(mol.title)
raise e
if desc is None:
desc = vec
else:
desc = np.vstack((desc, vec))
return np.atleast_2d(desc)
def __len__(self):
""" Returns the dimensions of descriptors """
return len(self.vina_scores)
def __reduce__(self):
return oddt_vina_descriptor, (self.protein, self.vina_scores)
| {
"repo_name": "oddt/oddt",
"path": "oddt/scoring/descriptors/__init__.py",
"copies": "1",
"size": "16319",
"license": "bsd-3-clause",
"hash": -8774605505998364000,
"line_mean": 36.3432494279,
"line_max": 146,
"alpha_frac": 0.51412464,
"autogenerated": false,
"ratio": 4.075674325674326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00026094244455629315,
"num_lines": 437
} |
from functools import partial
import numpy as np
from scipy.stats import multivariate_normal, norm
import bayesian_changepoint_detection.online_changepoint_detection as online
def test_multivariate():
np.random.seed(seed=34)
# 10-dimensional multivariate normal, that shifts its mean at t=50, 100, and 150
dataset = np.vstack((
multivariate_normal.rvs([0] * 10, size=50),
multivariate_normal.rvs([4] * 10, size=50),
multivariate_normal.rvs([0] * 10, size=50),
multivariate_normal.rvs([-4] * 10, size=50)
))
r, maxes = online.online_changepoint_detection(
dataset,
partial(online.constant_hazard, 50),
online.MultivariateT(dims=10)
)
# Assert that we detected the mean shifts
for brkpt in [50, 100, 150]:
assert maxes[brkpt + 1] < maxes[brkpt - 1]
def test_univariate():
np.random.seed(seed=34)
# 10-dimensional univariate normal
dataset = np.hstack((norm.rvs(0, size=50), norm.rvs(2, size=50)))
r, maxes = online.online_changepoint_detection(
dataset,
partial(online.constant_hazard, 20),
online.StudentT(0.1, .01, 1, 0)
)
assert maxes[50] - maxes[51] > 40
| {
"repo_name": "hildensia/bayesian_changepoint_detection",
"path": "test.py",
"copies": "1",
"size": "1207",
"license": "mit",
"hash": -3537339103070815000,
"line_mean": 31.6216216216,
"line_max": 84,
"alpha_frac": 0.6512013256,
"autogenerated": false,
"ratio": 3.343490304709141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4494691630309141,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from sklearn import datasets, cross_validation, preprocessing
from neupy import algorithms, layers
from utils import compare_networks
from base import BaseTestCase
class QuickPropTestCase(BaseTestCase):
def setUp(self):
super(QuickPropTestCase, self).setUp()
data, target = datasets.make_regression(n_samples=1500, n_features=5,
n_informative=5, n_targets=1,
random_state=33)
target_scaler = preprocessing.MinMaxScaler()
target = target_scaler.fit_transform(target.reshape(-1, 1))
self.data = cross_validation.train_test_split(data, target,
train_size=0.75)
self.connection = (5, 10, 1)
def test_quickprop(self):
x_train, x_test, y_train, y_test = self.data
qp = algorithms.Quickprop(
(5, 10, 1),
step=0.1,
upper_bound=1,
shuffle_data=True,
verbose=False,
)
qp.train(x_train, y_train, epochs=50)
error = qp.prediction_error(x_test, y_test)
self.assertAlmostEqual(0, error, places=2)
def test_compare_quickprop_and_bp(self):
x_train, _, y_train, _ = self.data
compare_networks(
# Test classes
algorithms.GradientDescent,
partial(algorithms.Quickprop, upper_bound=0.5),
# Test data
(x_train, y_train),
# Network configurations
connection=self.connection,
step=0.1,
shuffle_data=True,
# Test configurations
epochs=100,
verbose=False,
show_comparison_plot=False
)
| {
"repo_name": "stczhc/neupy",
"path": "tests/algorithms/gd/test_quickprop.py",
"copies": "1",
"size": "1816",
"license": "mit",
"hash": 1291272918537926000,
"line_mean": 32.0181818182,
"line_max": 77,
"alpha_frac": 0.5545154185,
"autogenerated": false,
"ratio": 4.223255813953489,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 55
} |
from functools import partial
import numpy as np
import copy
create_rollout_function = partial
def multitask_rollout(
env,
agent,
max_path_length=np.inf,
render=False,
render_kwargs=None,
observation_key=None,
desired_goal_key=None,
get_action_kwargs=None,
return_dict_obs=False,
full_o_postprocess_func=None,
):
if full_o_postprocess_func:
def wrapped_fun(env, agent, o):
full_o_postprocess_func(env, agent, observation_key, o)
else:
wrapped_fun = None
def obs_processor(o):
return np.hstack((o[observation_key], o[desired_goal_key]))
paths = rollout(
env,
agent,
max_path_length=max_path_length,
render=render,
render_kwargs=render_kwargs,
get_action_kwargs=get_action_kwargs,
preprocess_obs_for_policy_fn=obs_processor,
full_o_postprocess_func=wrapped_fun,
)
if not return_dict_obs:
paths['observations'] = paths['observations'][observation_key]
return paths
def contextual_rollout(
env,
agent,
observation_key=None,
context_keys_for_policy=None,
obs_processor=None,
**kwargs
):
if context_keys_for_policy is None:
context_keys_for_policy = ['context']
if not obs_processor:
def obs_processor(o):
combined_obs = [o[observation_key]]
for k in context_keys_for_policy:
combined_obs.append(o[k])
return np.concatenate(combined_obs, axis=0)
paths = rollout(
env,
agent,
preprocess_obs_for_policy_fn=obs_processor,
**kwargs
)
return paths
def rollout(
env,
agent,
max_path_length=np.inf,
render=False,
render_kwargs=None,
preprocess_obs_for_policy_fn=None,
get_action_kwargs=None,
return_dict_obs=False,
full_o_postprocess_func=None,
reset_callback=None,
):
if render_kwargs is None:
render_kwargs = {}
if get_action_kwargs is None:
get_action_kwargs = {}
if preprocess_obs_for_policy_fn is None:
preprocess_obs_for_policy_fn = lambda x: x
raw_obs = []
raw_next_obs = []
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
next_observations = []
path_length = 0
agent.reset()
o = env.reset()
if reset_callback:
reset_callback(env, agent, o)
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
raw_obs.append(o)
o_for_agent = preprocess_obs_for_policy_fn(o)
a, agent_info = agent.get_action(o_for_agent, **get_action_kwargs)
if full_o_postprocess_func:
full_o_postprocess_func(env, agent, o)
next_o, r, d, env_info = env.step(copy.deepcopy(a))
if render:
env.render(**render_kwargs)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
next_observations.append(next_o)
raw_next_obs.append(next_o)
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if d:
break
o = next_o
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
next_observations = np.array(next_observations)
if return_dict_obs:
observations = raw_obs
next_observations = raw_next_obs
rewards = np.array(rewards)
if len(rewards.shape) == 1:
rewards = rewards.reshape(-1, 1)
return dict(
observations=observations,
actions=actions,
rewards=rewards,
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
full_observations=raw_obs,
full_next_observations=raw_obs,
)
def deprecated_rollout(
env,
agent,
max_path_length=np.inf,
render=False,
render_kwargs=None,
):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
"""
if render_kwargs is None:
render_kwargs = {}
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
o = env.reset()
agent.reset()
next_o = None
path_length = 0
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
a, agent_info = agent.get_action(o)
next_o, r, d, env_info = env.step(a)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if d:
break
o = next_o
if render:
env.render(**render_kwargs)
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o])
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o, 0)
)
)
return dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
| {
"repo_name": "vitchyr/rlkit",
"path": "rlkit/samplers/rollout_functions.py",
"copies": "1",
"size": "6082",
"license": "mit",
"hash": -6450307428652556000,
"line_mean": 26.1517857143,
"line_max": 76,
"alpha_frac": 0.5808944426,
"autogenerated": false,
"ratio": 3.63755980861244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9718016576142412,
"avg_score": 0.00008753501400560224,
"num_lines": 224
} |
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from neupy.utils import asfloat, tensorflow_session
from neupy import algorithms, layers, utils
utils.reproducible()
X_train = np.array([
[0.9, 0.3],
[0.5, 0.3],
[0.2, 0.1],
[0.7, 0.5],
[0.1, 0.8],
[0.1, 0.9],
])
y_train = np.array([
[1],
[1],
[1],
[0],
[0],
[0],
])
default_weight = np.array([[-4.], [-4.]])
weights = None
current_epoch = 0
def draw_countour(xgrid, ygrid, target_function):
output = np.zeros((xgrid.shape[0], ygrid.shape[0]))
for i, x in enumerate(xgrid):
for j, y in enumerate(ygrid):
output[j, i] = target_function(x, y)
X, Y = np.meshgrid(xgrid, ygrid)
plt.contourf(X, Y, output, 20, alpha=1, cmap='Blues')
plt.colorbar()
def weight_quiver(weights, color='c'):
plt.quiver(weights[0, :-1],
weights[1, :-1],
weights[0, 1:] - weights[0, :-1],
weights[1, 1:] - weights[1, :-1],
scale_units='xy', angles='xy', scale=1,
color=color)
def save_epoch_weight(optimizer):
"""
Signal processor which save weight update for every
epoch.
"""
global weights
global current_epoch
session = tensorflow_session()
input_layer_weight = session.run(optimizer.network.layers[1].weight)
weights[:, current_epoch + 1:current_epoch + 2] = input_layer_weight
def create_network():
"""
Generate new network every time when we call it.
"""
return layers.join(
layers.Input(2),
layers.Sigmoid(1, weight=default_weight.copy(), bias=None)
)
def draw_quiver(network_class, name, color='r'):
"""
Train algorithm and draw quiver for every epoch update
for this algorithm.
"""
global weights
global current_epoch
bpn = network_class(create_network(), signals=save_epoch_weight)
# We don't know in advance number of epochs that network
# need to reach the goal. For this reason we use 1000 as
# an upper limit for all network epochs, later we
# need to fix
weights = np.zeros((2, 1000))
weights[:, 0:1] = default_weight.copy()
current_epoch = 0
while bpn.score(X_train, y_train) > 0.125:
bpn.train(X_train, y_train, epochs=1)
current_epoch += 1
weights = weights[:, :current_epoch + 1]
weight_quiver(weights, color=color)
label = "{name} ({n} steps)".format(name=name, n=current_epoch)
return mpatches.Patch(color=color, label=label)
def target_function(optimizer, x, y):
weight = optimizer.network.layers[1].weight
new_weight = np.array([[x], [y]])
session = tensorflow_session()
weight.load(asfloat(new_weight), session)
return optimizer.score(X_train, y_train)
# Get data for countour plot
bp_network = algorithms.GradientDescent(
create_network(),
step=0.3,
batch_size=None,
signals=save_epoch_weight
)
network_target_function = partial(target_function, bp_network)
plt.figure()
plt.title("Approximation function contour plot")
plt.xlabel("First weight")
plt.ylabel("Second weight")
draw_countour(
np.linspace(-5, 5, 50),
np.linspace(-5, 5, 50),
network_target_function
)
algorithms = (
(partial(algorithms.GradientDescent, step=0.3), 'Gradient Descent', 'k'),
(partial(algorithms.Momentum, batch_size=None, step=0.3), 'Momentum', 'g'),
(partial(algorithms.RPROP, step=0.3), 'RPROP', 'm'),
(partial(algorithms.IRPROPPlus, step=0.3), 'iRPROP+', 'r'),
(partial(algorithms.Hessian, penalty_const=0.01), "Newton's method", 'y'),
)
patches = []
for algorithm, algorithm_name, color in algorithms:
print("The '{}' network training".format(algorithm_name))
quiver_patch = draw_quiver(algorithm, algorithm_name, color)
patches.append(quiver_patch)
print("Plot training results")
plt.legend(handles=patches)
plt.show()
| {
"repo_name": "itdxer/neupy",
"path": "examples/mlp/gd_algorithms_visualization.py",
"copies": "1",
"size": "3965",
"license": "mit",
"hash": 4772559025576818000,
"line_mean": 24.5806451613,
"line_max": 79,
"alpha_frac": 0.6348045397,
"autogenerated": false,
"ratio": 3.2714521452145213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4406256684914521,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
from mne.utils import _TempDir
from pactools.dar_model import AR, DAR, HAR, StableDAR
from pactools.utils.testing import assert_equal, assert_greater
from pactools.utils.testing import assert_raises, assert_array_equal
from pactools.utils.testing import assert_true, assert_array_almost_equal
from pactools.comodulogram import Comodulogram, read_comodulogram
from pactools.comodulogram import ALL_PAC_METRICS, BICOHERENCE_PAC_METRICS
from pactools.simulate_pac import simulate_pac
# Parameters used for the simulated signal in the test
low_fq_range = [1., 3., 5., 7.]
high_fq_range = [25., 50., 75.]
n_low = len(low_fq_range)
n_high = len(high_fq_range)
high_fq = high_fq_range[1]
low_fq = low_fq_range[1]
n_points = 1024
fs = 200.
signal = simulate_pac(n_points=n_points, fs=fs, high_fq=high_fq, low_fq=low_fq,
low_fq_width=1., noise_level=0.1, random_state=0)
signal_copy = signal.copy()
class ComodTest(Comodulogram):
# A comodulogram call with default params used for testing
def __init__(self, fs=fs, low_fq_range=low_fq_range, low_fq_width=1.,
high_fq_range=high_fq_range, high_fq_width='auto',
method='tort', n_surrogates=0, vmin=None, vmax=None,
progress_bar=False, ax_special=None, minimum_shift=1.0,
random_state=0, coherence_params=dict(), low_fq_width_2=4.0):
super(ComodTest, self).__init__(
fs=fs, low_fq_range=low_fq_range, low_fq_width=low_fq_width,
high_fq_range=high_fq_range, high_fq_width=high_fq_width,
method=method, n_surrogates=n_surrogates, vmin=vmin, vmax=vmax,
progress_bar=progress_bar, ax_special=ax_special,
minimum_shift=minimum_shift, random_state=random_state,
coherence_params=coherence_params, low_fq_width_2=low_fq_width_2)
def fast_comod(low_sig=signal, high_sig=None, mask=None, *args, **kwargs):
return ComodTest(*args, **kwargs).fit(low_sig=low_sig, high_sig=high_sig,
mask=mask).comod_
def test_input_checking():
# test that we have a ValueError for bad parameters
func = partial(fast_comod, method='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, fs='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, low_sig='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, high_sig='wrong')
assert_raises(ValueError, func)
def test_different_dimension_in_input():
# Test that 1D or 2D signals are accepted, but not 3D
for dim in [
(4, -1),
(-1, ),
(1, -1),
]:
fast_comod(signal.reshape(*dim))
dim = (2, 2, -1)
assert_raises(ValueError, fast_comod, signal.reshape(*dim))
def test_high_sig_identical():
# Test that we have the same result with high_sig=low_sig and high_sig=None
for method in ALL_PAC_METRICS:
if method in BICOHERENCE_PAC_METRICS:
continue
comod_0 = fast_comod(method=method)
comod_1 = fast_comod(high_sig=signal, method=method)
assert_array_equal(comod_0, comod_1)
def test_comod_correct_maximum():
# Test that the PAC is maximum at the correct location in the comodulogram
for method in ALL_PAC_METRICS:
est = ComodTest(method=method, progress_bar=True).fit(signal)
comod = est.comod_
# test the shape of the comodulogram
assert_array_equal(comod.shape, (n_low, n_high))
# the bicoherence metrics fail this test with current parameters
if method in BICOHERENCE_PAC_METRICS or method == 'jiang':
continue
low_fq_0, high_fq_0, max_pac = est.get_maximum_pac()
assert_equal(low_fq_0, low_fq)
assert_equal(high_fq_0, high_fq)
assert_equal(max_pac, comod.max())
assert_true(np.all(comod > 0))
def test_empty_mask():
# Test that using an empty mask does not change the results
mask = np.zeros(n_points, dtype=bool)
for method in ALL_PAC_METRICS:
comod_0 = fast_comod(mask=mask, method=method)
comod_1 = fast_comod(low_sig=signal[~mask], method=method)
assert_array_almost_equal(comod_0, comod_1, decimal=7)
def test_surrogates():
# Test the surrogates comodulogram
for method in ALL_PAC_METRICS:
msg = 'with method=%s' % method
if method in BICOHERENCE_PAC_METRICS or method == 'jiang':
continue
n_surrogates = 10
est = ComodTest(method=method, n_surrogates=n_surrogates).fit(signal)
assert_array_equal(est.comod_.shape, (n_low, n_high), err_msg=msg)
assert_array_equal(est.surrogates_.shape, (n_surrogates, n_low,
n_high), err_msg=msg)
# z-score
z_score = est.comod_z_score_
assert_array_equal(z_score.shape, (n_low, n_high), err_msg=msg)
if method != 'jiang': # 'jiang' method does not estimate CFC but CFD
assert_greater(z_score[1, 1], z_score[-1, -1], msg=msg)
# surrogate_max
surrogate_max = est.surrogate_max_
assert_array_equal(surrogate_max.shape, (n_surrogates, ))
assert_greater(est.comod_[1, 1], surrogate_max.max(), msg=msg)
assert_greater(surrogate_max.max(), est.comod_[-1, -1], msg=msg)
# Smoke test with contours in the plotting function
est.plot(contour_level=0.01, contour_method='comod_max')
est.plot(contour_level=3, contour_method='z_score')
plt.close('all')
def test_no_surrogate():
# Test the errors when n_surrogates == 0
for method in ALL_PAC_METRICS:
est = ComodTest(method=method, n_surrogates=0).fit(signal)
with assert_raises(ValueError):
est.comod_z_score_
with assert_raises(ValueError):
est.surrogate_max_
with assert_raises(ValueError):
est.plot(contour_level=0.01)
plt.close('all')
def test_comodulogram_dar_models():
# Smoke test with DAR models
for klass in (AR, DAR, HAR, StableDAR):
if klass is StableDAR:
model = klass(ordar=10, ordriv=2, iter_newton=10)
else:
model = klass(ordar=10, ordriv=2)
comod = fast_comod(method=model)
assert_true(~np.any(np.isnan(comod)))
def test_plot_comodulogram():
# Smoke test with the standard plotting function
est = ComodTest().fit(signal)
est.plot()
# Smoke test with the special plotting functions
ax = plt.figure().gca()
for method in ALL_PAC_METRICS:
est = ComodTest(low_fq_range=[low_fq], method=method,
ax_special=ax).fit(signal)
# Test that it raises an error if ax_special is not None and low_fq_range
# has more than one element
func = partial(fast_comod, ax_special=ax)
assert_raises(ValueError, func)
plt.close('all')
def test_signal_unchanged():
# Test that signal has not been changed during the test
assert_array_equal(signal_copy, signal)
def _compare_values(v, v2):
if isinstance(v, np.ndarray):
assert_array_equal(v, v2)
elif isinstance(v, dict):
for key, value in v.items():
_compare_values(v[key], v2[key])
elif isinstance(v, np.random.RandomState):
for s, s2 in zip(v.get_state(), v2.get_state()):
_compare_values(s, s2)
else:
assert_equal(v, v2)
def _compare_instance(inst1, inst2):
for k, v in vars(inst1).items():
v2 = getattr(inst2, k)
_compare_values(v, v2)
def test_save():
# Test File IO
tmp = _TempDir()
est = ComodTest()
fname = tmp + '/test.hdf5'
est.save(fname)
est2 = read_comodulogram(fname)
_compare_instance(est, est2)
# Now fit and save
est.fit(signal)
est.save(fname, overwrite=True)
est3 = read_comodulogram(fname)
_compare_instance(est, est3)
| {
"repo_name": "pactools/pactools",
"path": "pactools/tests/test_comodulogram.py",
"copies": "1",
"size": "7965",
"license": "bsd-3-clause",
"hash": 8933751557045328000,
"line_mean": 33.9342105263,
"line_max": 79,
"alpha_frac": 0.6360326428,
"autogenerated": false,
"ratio": 3.159460531535105,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4295493174335105,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import pandas as pd
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
from graphysio import utils
from graphysio.algorithms import waveform
from graphysio.structures import CycleId
from graphysio.utils import estimateSampleRate
class CurveItem(pg.PlotDataItem):
visible = QtCore.pyqtSignal()
invisible = QtCore.pyqtSignal()
def __init__(self, series, parent, pen=None):
self.parent = parent
# Drop NA. All following code can assume no NaNs.
series = series.dropna()
# Make timestamp unique and use mean of values on duplicates
series = series.groupby(level=0).mean()
self.series = series
self.samplerate = estimateSampleRate(self.series)
if pen is None:
pen = QtGui.QColor(QtCore.Qt.black)
super().__init__(name=series.name, pen=pen, antialias=True)
self.render()
def render(self):
self.setData(x=self.series.index.values, y=self.series.values)
def extend(self, newseries):
merged1 = self.series.append(newseries)
merged2 = merged1.sort_index()
newseries = merged2.groupby(merged2.index).mean()
self.series = newseries
self.render()
def rename(self, newname: str):
self.series.name = newname
self.opts['name'] = newname
class POIItem(pg.ScatterPlotItem):
sym = {
'start': 'star',
'stop': 's',
'diastole': 't1',
'systole': 't',
'dicrotic': 'd',
'point': 'o',
}
# Symbols = OrderedDict([(name, QtGui.QPainterPath()) for name in ['o', 's', 't', 't1', 't2', 't3','d', '+', 'x', 'p', 'h', 'star']])
def __init__(self, parent, name, pen=None):
super().__init__(pen=pen, name=name)
self.parent = parent
self.indices = {}
self.selected = []
self.resym = {value: key for key, value in self.sym.items()}
self.render()
def addPointsByLocation(self, key, locations):
if key not in self.indices:
self.indices[key] = pd.Index([])
oldidx = self.indices[key]
newidx = oldidx.append(pd.Index(locations))
self.indices[key] = newidx.unique().sort_values()
self.render()
def removePointsByLocation(self, key, locations):
if key not in self.indices:
return
oldidx = self.indices[key]
dellocs = []
for loc in locations:
locidx = oldidx.get_loc(loc, method='nearest')
dellocs.append(locidx)
newidx = oldidx.delete(dellocs)
self.indices[key] = newidx
self.render()
def removePoints(self, points):
for point in points:
try:
sym = self.resym[point.symbol()]
idx = self.indices[sym]
except KeyError:
# Should not happen
continue
nidx = idx.get_loc(point.pos().x(), method='nearest')
self.indices[sym] = idx.delete(nidx)
self.render()
def render(self):
data = []
for key, idx in self.indices.items():
if len(idx) < 1:
continue
idxnona = idx.dropna()
points = self.parent.series.loc[idxnona]
tmp = pd.DataFrame({'points': points, 'sym': self.sym[key]}, index=idxnona)
data.append(tmp)
if len(data) < 1:
self.clear()
return
feet = pd.concat(data)
self.setData(
x=feet.index.values, y=feet['points'].values, symbol=feet['sym'].values
)
def isPointSelected(self, point):
return point in self.selected
def selectPoint(self, point):
if not self.isPointSelected(point):
self.selected.append(point)
point.setPen('r')
point.setBrush('r')
def unselectPoint(self, point):
if self.isPointSelected(point):
self.selected.remove(point)
point.resetPen()
point.resetBrush()
def removeSelection(self):
self.removePoints(self.selected)
self.selected = []
self.render()
def rename(self, newname: str):
self.opts['name'] = newname
class CurveItemWithPOI(CurveItem):
visible = QtCore.pyqtSignal()
invisible = QtCore.pyqtSignal()
@staticmethod
def sigPointClicked(feetitem, points):
point = points[0] # only one point per click
if not feetitem.isPointSelected(point):
feetitem.selectPoint(point)
else:
feetitem.unselectPoint(point)
def __init__(self, series, parent, pen=None):
super().__init__(series, parent, pen)
feetname = f'{series.name}-feet'
self.feetitem = POIItem(self, name=feetname, pen=pen)
parent.addItem(self.feetitem)
self.feetitem.sigClicked.connect(self.sigPointClicked)
self.visible.connect(self.__becameVisible)
self.invisible.connect(self.__becameInvisible)
def __becameVisible(self):
if self.feetitem not in self.parent.listDataItems():
self.parent.addItem(self.feetitem)
self.render()
self.feetitem.render()
def __becameInvisible(self):
self.parent.removeItem(self.feetitem)
def addFeet(self, cycleid):
if cycleid is CycleId.none:
return
elif cycleid is CycleId.velocity:
starts, stops = waveform.findFlowCycles(self)
self.feetitem.indices['start'] = starts
self.feetitem.indices['stop'] = stops
elif cycleid is CycleId.foot:
foot = waveform.findPressureFeet(self)
self.feetitem.indices['start'] = foot
elif cycleid is CycleId.pressure:
if 'start' not in self.feetitem.indices:
self.addFeet(CycleId.foot)
dia, sbp, dic = waveform.findPressureFull(self)
self.feetitem.indices['diastole'] = dia
self.feetitem.indices['systole'] = sbp
self.feetitem.indices['dicrotic'] = dic
else:
raise ValueError(cycleid)
self.feetitem.render()
def getCycleIndices(self, vrange=None):
s = self.series
clip = partial(utils.clip, vrange=vrange)
hasstarts = ('start' in self.feetitem.indices) and self.feetitem.indices[
'start'
].size > 0
hasstops = ('stop' in self.feetitem.indices) and self.feetitem.indices[
'stop'
].size > 0
if vrange:
xmin, xmax = vrange
else:
xmin = s.index[0]
xmax = s.index[-1]
if not hasstarts:
# We have no feet, treat the whole signal as one cycle
locs = (s.index.get_loc(i, method='nearest') for i in [xmin, xmax])
indices = (s.index[l] for l in locs)
begins, ends = [np.array([i]) for i in indices]
elif not hasstops:
# We have no stops, starts serve as stops for previous cycle
begins = clip(self.feetitem.indices['start'].values)
endloc = s.index.get_loc(xmax, method='nearest')
end = s.index[endloc]
ends = np.append(begins[1:], end)
else:
# We have starts and stops, use them
begins = self.feetitem.indices['start'].values
ends = self.feetitem.indices['stop'].values
begins, ends = map(clip, [begins, ends])
# Handle the case where we start in the middle of a cycle
while ends[0] <= begins[0]:
ends = ends[1:]
begins, ends = utils.truncatevecs([begins, ends])
durations = ends - begins
return (begins, durations)
def getFeetPoints(self, feetname):
feetidx = self.feetitem.indices[feetname]
feetnona = feetidx[pd.notnull(feetidx)]
return self.series.loc[feetnona]
| {
"repo_name": "jaj42/GraPhysio",
"path": "graphysio/plotwidgets/curves.py",
"copies": "1",
"size": "7891",
"license": "isc",
"hash": 4886766655168575000,
"line_mean": 32.7222222222,
"line_max": 137,
"alpha_frac": 0.583956406,
"autogenerated": false,
"ratio": 3.754043767840152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48380001738401524,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import pandas as pd
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from graphysio import utils
from graphysio.structures import CycleId
from graphysio.utils import estimateSampleRate
from graphysio.algorithms import waveform
class CurveItem(pg.PlotDataItem):
visible = QtCore.pyqtSignal()
invisible = QtCore.pyqtSignal()
def __init__(self, series, parent, pen=None):
self.parent = parent
# Drop NA. All following code can assume no NaNs.
series = series.dropna()
# Make timestamp unique and use mean of values on duplicates
series = series.groupby(level=0).mean()
self.series = series
self.samplerate = estimateSampleRate(self.series)
if pen is None:
pen = QtGui.QColor(QtCore.Qt.black)
super().__init__(name=series.name, pen=pen, antialias=True)
self.render()
def render(self):
self.setData(x=self.series.index.values, y=self.series.values)
def extend(self, newseries):
merged1 = self.series.append(newseries)
merged2 = merged1.sort_index()
newseries = merged2.groupby(merged2.index).mean()
self.series = newseries
self.render()
def rename(self, newname: str):
self.series.name = newname
self.opts['name'] = newname
class POIItem(pg.ScatterPlotItem):
sym = {
'start': 'star',
'stop': 's',
'diastole': 't1',
'systole': 't',
'dicrotic': 'd',
'point': 'o',
}
# Symbols = OrderedDict([(name, QtGui.QPainterPath()) for name in ['o', 's', 't', 't1', 't2', 't3','d', '+', 'x', 'p', 'h', 'star']])
def __init__(self, parent, name, pen=None):
super().__init__(pen=pen, name=name)
self.parent = parent
self.indices = {}
self.selected = []
self.resym = {value: key for key, value in self.sym.items()}
self.render()
def addPointsByLocation(self, key, locations):
if key not in self.indices:
self.indices[key] = pd.Index([])
oldidx = self.indices[key]
newidx = oldidx.append(pd.Index(locations))
self.indices[key] = newidx.unique().sort_values()
self.render()
def removePointsByLocation(self, key, locations):
if key not in self.indices:
return
oldidx = self.indices[key]
dellocs = []
for loc in locations:
locidx = oldidx.get_loc(loc, method='nearest')
dellocs.append(locidx)
newidx = oldidx.delete(dellocs)
self.indices[key] = newidx
self.render()
def removePoints(self, points):
for point in points:
try:
sym = self.resym[point.symbol()]
idx = self.indices[sym]
except KeyError:
# Should not happen
continue
nidx = idx.get_loc(point.pos().x(), method='nearest')
self.indices[sym] = idx.delete(nidx)
self.render()
def render(self):
data = []
for key, idx in self.indices.items():
if len(idx) < 1:
continue
idxnona = idx.dropna()
points = self.parent.series.loc[idxnona]
tmp = pd.DataFrame({'points': points, 'sym': self.sym[key]}, index=idxnona)
data.append(tmp)
if len(data) < 1:
self.clear()
return
feet = pd.concat(data)
self.setData(
x=feet.index.values, y=feet['points'].values, symbol=feet['sym'].values
)
def isPointSelected(self, point):
return point in self.selected
def selectPoint(self, point):
if not self.isPointSelected(point):
self.selected.append(point)
point.setPen('r')
point.setBrush('r')
def unselectPoint(self, point):
if self.isPointSelected(point):
self.selected.remove(point)
point.resetPen()
point.resetBrush()
def removeSelection(self):
self.removePoints(self.selected)
self.selected = []
self.render()
def rename(self, newname: str):
self.opts['name'] = newname
class CurveItemWithPOI(CurveItem):
visible = QtCore.pyqtSignal()
invisible = QtCore.pyqtSignal()
@staticmethod
def sigPointClicked(feetitem, points):
point = points[0] # only one point per click
if not feetitem.isPointSelected(point):
feetitem.selectPoint(point)
else:
feetitem.unselectPoint(point)
def __init__(self, series, parent, pen=None):
super().__init__(series, parent, pen)
feetname = f'{series.name}-feet'
self.feetitem = POIItem(self, name=feetname, pen=pen)
parent.addItem(self.feetitem)
self.feetitem.sigClicked.connect(self.sigPointClicked)
self.visible.connect(self.__becameVisible)
self.invisible.connect(self.__becameInvisible)
def __becameVisible(self):
if self.feetitem not in self.parent.listDataItems():
self.parent.addItem(self.feetitem)
self.render()
self.feetitem.render()
def __becameInvisible(self):
self.parent.removeItem(self.feetitem)
def addFeet(self, cycleid):
if cycleid is CycleId.none:
return
elif cycleid is CycleId.velocity:
starts, stops = waveform.findFlowCycles(self)
self.feetitem.indices['start'] = starts
self.feetitem.indices['stop'] = stops
elif cycleid is CycleId.foot:
foot = waveform.findPressureFeet(self)
self.feetitem.indices['start'] = foot
elif cycleid is CycleId.pressure:
if 'start' not in self.feetitem.indices:
self.addFeet(CycleId.foot)
dia, sbp, dic = waveform.findPressureFull(self)
self.feetitem.indices['diastole'] = dia
self.feetitem.indices['systole'] = sbp
self.feetitem.indices['dicrotic'] = dic
else:
raise ValueError(cycleid)
self.feetitem.render()
def getCycleIndices(self, vrange=None):
s = self.series
clip = partial(utils.clip, vrange=vrange)
hasstarts = ('start' in self.feetitem.indices) and self.feetitem.indices[
'start'
].size > 0
hasstops = ('stop' in self.feetitem.indices) and self.feetitem.indices[
'stop'
].size > 0
if vrange:
xmin, xmax = vrange
else:
xmin = s.index[0]
xmax = s.index[-1]
if not hasstarts:
# We have no feet, treat the whole signal as one cycle
locs = (s.index.get_loc(i, method='nearest') for i in [xmin, xmax])
indices = (s.index[l] for l in locs)
begins, ends = [np.array([i]) for i in indices]
elif not hasstops:
# We have no stops, starts serve as stops for previous cycle
begins = clip(self.feetitem.indices['start'].values)
endloc = s.index.get_loc(xmax, method='nearest')
end = s.index[endloc]
ends = np.append(begins[1:], end)
else:
# We have starts and stops, use them
begins = self.feetitem.indices['start'].values
ends = self.feetitem.indices['stop'].values
begins, ends = map(clip, [begins, ends])
# Handle the case where we start in the middle of a cycle
while ends[0] <= begins[0]:
ends = ends[1:]
begins, ends = utils.truncatevecs([begins, ends])
durations = ends - begins
return (begins, durations)
def getFeetPoints(self, feetname):
feetidx = self.feetitem.indices[feetname]
feetnona = feetidx[pd.notnull(feetidx)]
return self.series.loc[feetnona]
| {
"repo_name": "jaj42/dyngraph",
"path": "graphysio/plotwidgets/curves.py",
"copies": "1",
"size": "7892",
"license": "isc",
"hash": 6289507152615582000,
"line_mean": 32.5829787234,
"line_max": 137,
"alpha_frac": 0.5838824126,
"autogenerated": false,
"ratio": 3.7527341892534474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48366166018534473,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import pytest
from guacamol.score_modifier import LinearModifier, SquaredModifier, AbsoluteScoreModifier, GaussianModifier, \
MinGaussianModifier, MaxGaussianModifier, ThresholdedLinearModifier, ClippedScoreModifier, \
SmoothClippedScoreModifier, ChainedModifier
scalar_value = 8.343
value_array = np.array([[-3.3, 0, 5.5],
[0.011, 2.0, -33]])
def test_linear_function_default():
f = LinearModifier()
assert f(scalar_value) == scalar_value
assert np.array_equal(f(value_array), value_array)
def test_linear_function_with_slope():
slope = 3.3
f = LinearModifier(slope=slope)
assert f(scalar_value) == slope * scalar_value
assert np.array_equal(f(value_array), slope * value_array)
def test_squared_function():
target_value = 5.555
coefficient = 0.123
f = SquaredModifier(target_value=target_value, coefficient=coefficient)
expected_scalar = 1.0 - coefficient * (target_value - scalar_value) ** 2
expected_array = 1.0 - coefficient * np.square(target_value - value_array)
assert f(scalar_value) == expected_scalar
assert np.array_equal(f(value_array), expected_array)
def test_absolute_function():
target_value = 5.555
f = AbsoluteScoreModifier(target_value=target_value)
expected_scalar = 1.0 - abs(target_value - scalar_value)
expected_array = 1.0 - np.abs(target_value - value_array)
assert f(scalar_value) == expected_scalar
assert np.array_equal(f(value_array), expected_array)
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def test_gaussian_function():
mu = -1.223
sigma = 0.334
f = GaussianModifier(mu=mu, sigma=sigma)
assert f(mu) == 1.0
assert f(scalar_value) == gaussian(scalar_value, mu, sigma)
assert np.allclose(f(value_array), gaussian(value_array, mu, sigma))
def test_min_gaussian_function():
mu = -1.223
sigma = 0.334
f = MinGaussianModifier(mu=mu, sigma=sigma)
assert f(mu) == 1.0
low_value = -np.inf
large_value = np.inf
assert f(low_value) == 1.0
assert f(large_value) == 0.0
full_gaussian = partial(gaussian, mu=mu, sig=sigma)
min_gaussian_lambda = lambda x: 1.0 if x < mu else full_gaussian(x)
min_gaussian = np.vectorize(min_gaussian_lambda)
assert f(scalar_value) == min_gaussian(scalar_value)
assert np.allclose(f(value_array), min_gaussian(value_array))
def test_max_gaussian_function():
mu = -1.223
sigma = 0.334
f = MaxGaussianModifier(mu=mu, sigma=sigma)
assert f(mu) == 1.0
low_value = -np.inf
large_value = np.inf
assert f(low_value) == 0.0
assert f(large_value) == 1.0
full_gaussian = partial(gaussian, mu=mu, sig=sigma)
max_gaussian_lambda = lambda x: 1.0 if x > mu else full_gaussian(x)
max_gaussian = np.vectorize(max_gaussian_lambda)
assert f(scalar_value) == max_gaussian(scalar_value)
assert np.allclose(f(value_array), max_gaussian(value_array))
def test_tanimoto_threshold_function():
threshold = 5.555
f = ThresholdedLinearModifier(threshold=threshold)
large_value = np.inf
assert f(large_value) == 1.0
assert f(threshold) == 1.0
expected_array = np.minimum(value_array, threshold) / threshold
assert np.array_equal(f(value_array), expected_array)
def test_clipped_function():
min_x = 4.4
max_x = 8.8
min_score = -3.3
max_score = 9.2
modifier = ClippedScoreModifier(upper_x=max_x, lower_x=min_x, high_score=max_score, low_score=min_score)
# values smaller than min_x should be assigned min_score
for x in [-2, 0, 4, 4.4]:
assert modifier(x) == min_score
# values larger than max_x should be assigned min_score
for x in [8.8, 9.0, 1000]:
assert modifier(x) == max_score
# values in between are interpolated
slope = (max_score - min_score) / (max_x - min_x)
for x in [4.4, 4.8, 5.353, 8.034, 8.8]:
dx = x - min_x
dy = dx * slope
assert modifier(x) == pytest.approx(min_score + dy)
def test_clipped_function_inverted():
# The clipped function also works for decreasing scores
max_x = 4.4
min_x = 8.8
min_score = -3.3
max_score = 9.2
modifier = ClippedScoreModifier(upper_x=max_x, lower_x=min_x, high_score=max_score, low_score=min_score)
# values smaller than max_x should be assigned the maximal score
for x in [-2, 0, 4, 4.4]:
assert modifier(x) == max_score
# values larger than min_x should be assigned min_score
for x in [8.8, 9.0, 1000]:
assert modifier(x) == min_score
# values in between are interpolated
slope = (max_score - min_score) / (max_x - min_x)
for x in [4.4, 4.8, 5.353, 8.034, 8.8]:
dx = x - min_x
dy = dx * slope
assert modifier(x) == pytest.approx(min_score + dy)
def test_thresholded_is_special_case_of_clipped_for_positive_input():
threshold = 4.584
thresholded_modifier = ThresholdedLinearModifier(threshold=threshold)
clipped_modifier = ClippedScoreModifier(upper_x=threshold)
values = np.array([0, 2.3, 8.545, 3.23, 0.12, 55.555])
assert np.allclose(thresholded_modifier(values), clipped_modifier(values))
def test_smooth_clipped():
min_x = 4.4
max_x = 8.8
min_score = -3.3
max_score = 9.2
modifier = SmoothClippedScoreModifier(upper_x=max_x, lower_x=min_x, high_score=max_score, low_score=min_score)
# assert that the slope in the middle is correct
middle_x = (min_x + max_x) / 2
delta = 1e-5
vp = modifier(middle_x + delta)
vm = modifier(middle_x - delta)
slope = (vp - vm) / (2 * delta)
expected_slope = (max_score - min_score) / (max_x - min_x)
assert slope == pytest.approx(expected_slope)
# assert behavior at +- infinity
assert modifier(1e5) == pytest.approx(max_score)
assert modifier(-1e5) == pytest.approx(min_score)
def test_smooth_clipped_inverted():
# The smooth clipped function also works for decreasing scores
max_x = 4.4
min_x = 8.8
min_score = -3.3
max_score = 9.2
modifier = SmoothClippedScoreModifier(upper_x=max_x, lower_x=min_x, high_score=max_score, low_score=min_score)
# assert that the slope in the middle is correct
middle_x = (min_x + max_x) / 2
delta = 1e-5
vp = modifier(middle_x + delta)
vm = modifier(middle_x - delta)
slope = (vp - vm) / (2 * delta)
expected_slope = (max_score - min_score) / (max_x - min_x)
assert slope == pytest.approx(expected_slope)
# assert behavior at +- infinity
assert modifier(1e5) == pytest.approx(min_score)
assert modifier(-1e5) == pytest.approx(max_score)
def test_chained_modifier():
linear = LinearModifier(slope=2)
squared = SquaredModifier(10.0)
chained_1 = ChainedModifier([linear, squared])
chained_2 = ChainedModifier([squared, linear])
expected_1 = 1.0 - np.square(10.0 - (2 * scalar_value))
expected_2 = 2 * (1.0 - np.square(10.0 - scalar_value))
assert chained_1(scalar_value) == expected_1
assert chained_2(scalar_value) == expected_2
| {
"repo_name": "BenevolentAI/guacamol",
"path": "tests/test_score_modifier.py",
"copies": "1",
"size": "7184",
"license": "mit",
"hash": -6061585357880673000,
"line_mean": 27.9677419355,
"line_max": 114,
"alpha_frac": 0.6513084633,
"autogenerated": false,
"ratio": 3.1412330564057718,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42925415197057715,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas import (
Categorical,
CategoricalIndex,
Float64Index,
Index,
Int64Index,
Interval,
IntervalIndex,
date_range,
notna,
period_range,
timedelta_range,
)
from pandas.core.arrays import IntervalArray
import pandas.core.common as com
import pandas.util.testing as tm
@pytest.fixture(params=[None, "foo"])
def name(request):
return request.param
class Base:
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
get_kwargs_from_breaks to the expected format.
"""
@pytest.mark.parametrize(
"breaks",
[
[3, 14, 15, 92, 653],
np.arange(10, dtype="int64"),
Int64Index(range(-10, 11)),
Float64Index(np.arange(20, 30, 0.5)),
date_range("20180101", periods=10),
date_range("20180101", periods=10, tz="US/Eastern"),
timedelta_range("1 day", periods=10),
],
)
def test_constructor(self, constructor, breaks, closed, name):
result_kwargs = self.get_kwargs_from_breaks(breaks, closed)
result = constructor(closed=closed, name=name, **result_kwargs)
assert result.closed == closed
assert result.name == name
assert result.dtype.subtype == getattr(breaks, "dtype", "int64")
tm.assert_index_equal(result.left, Index(breaks[:-1]))
tm.assert_index_equal(result.right, Index(breaks[1:]))
@pytest.mark.parametrize(
"breaks, subtype",
[
(Int64Index([0, 1, 2, 3, 4]), "float64"),
(Int64Index([0, 1, 2, 3, 4]), "datetime64[ns]"),
(Int64Index([0, 1, 2, 3, 4]), "timedelta64[ns]"),
(Float64Index([0, 1, 2, 3, 4]), "int64"),
(date_range("2017-01-01", periods=5), "int64"),
(timedelta_range("1 day", periods=5), "int64"),
],
)
def test_constructor_dtype(self, constructor, breaks, subtype):
# GH 19262: conversion via dtype parameter
expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype))
expected = constructor(**expected_kwargs)
result_kwargs = self.get_kwargs_from_breaks(breaks)
iv_dtype = IntervalDtype(subtype)
for dtype in (iv_dtype, str(iv_dtype)):
result = constructor(dtype=dtype, **result_kwargs)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("breaks", [[np.nan] * 2, [np.nan] * 4, [np.nan] * 50])
def test_constructor_nan(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_subtype = np.float64
expected_values = np.array(breaks[:-1], dtype=object)
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result._ndarray_values, expected_values)
@pytest.mark.parametrize(
"breaks",
[
[],
np.array([], dtype="int64"),
np.array([], dtype="float64"),
np.array([], dtype="datetime64[ns]"),
np.array([], dtype="timedelta64[ns]"),
],
)
def test_constructor_empty(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_values = np.array([], dtype=object)
expected_subtype = getattr(breaks, "dtype", np.int64)
assert result.empty
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result._ndarray_values, expected_values)
@pytest.mark.parametrize(
"breaks",
[
tuple("0123456789"),
list("abcdefghij"),
np.array(list("abcdefghij"), dtype=object),
np.array(list("abcdefghij"), dtype="<U1"),
],
)
def test_constructor_string(self, constructor, breaks):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
constructor(**self.get_kwargs_from_breaks(breaks))
@pytest.mark.parametrize("cat_constructor", [Categorical, CategoricalIndex])
def test_constructor_categorical_valid(self, constructor, cat_constructor):
# GH 21243/21253
if isinstance(constructor, partial) and constructor.func is Index:
# Index is defined to create CategoricalIndex from categorical data
pytest.skip()
breaks = np.arange(10, dtype="int64")
expected = IntervalIndex.from_breaks(breaks)
cat_breaks = cat_constructor(breaks)
result_kwargs = self.get_kwargs_from_breaks(cat_breaks)
result = constructor(**result_kwargs)
tm.assert_index_equal(result, expected)
def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
# invalid closed
msg = "invalid option for 'closed': invalid"
with pytest.raises(ValueError, match=msg):
constructor(closed="invalid", **filler)
# unsupported dtype
msg = "dtype must be an IntervalDtype, got int64"
with pytest.raises(TypeError, match=msg):
constructor(dtype="int64", **filler)
# invalid dtype
msg = "data type 'invalid' not understood"
with pytest.raises(TypeError, match=msg):
constructor(dtype="invalid", **filler)
# no point in nesting periods in an IntervalIndex
periods = period_range("2000-01-01", periods=10)
periods_kwargs = self.get_kwargs_from_breaks(periods)
msg = "Period dtypes are not supported, use a PeriodIndex instead"
with pytest.raises(ValueError, match=msg):
constructor(**periods_kwargs)
# decreasing values
decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1))
msg = "left side of interval must be <= right side"
with pytest.raises(ValueError, match=msg):
constructor(**decreasing_kwargs)
class TestFromArrays(Base):
"""Tests specific to IntervalIndex.from_arrays"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_arrays
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_arrays
"""
return {"left": breaks[:-1], "right": breaks[1:]}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_arrays(data[:-1], data[1:])
# unequal length
left = [0, 1, 2]
right = [2, 3]
msg = "left and right must have the same length"
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(left, right)
@pytest.mark.parametrize(
"left_subtype, right_subtype", [(np.int64, np.float64), (np.float64, np.int64)]
)
def test_mixed_float_int(self, left_subtype, right_subtype):
"""mixed int/float left/right results in float for both sides"""
left = np.arange(9, dtype=left_subtype)
right = np.arange(1, 10, dtype=right_subtype)
result = IntervalIndex.from_arrays(left, right)
expected_left = Float64Index(left)
expected_right = Float64Index(right)
expected_subtype = np.float64
tm.assert_index_equal(result.left, expected_left)
tm.assert_index_equal(result.right, expected_right)
assert result.dtype.subtype == expected_subtype
class TestFromBreaks(Base):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_breaks
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
"""
return {"breaks": breaks}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_breaks(data)
def test_length_one(self):
"""breaks of length one produce an empty IntervalIndex"""
breaks = [0]
result = IntervalIndex.from_breaks(breaks)
expected = IntervalIndex.from_breaks([])
tm.assert_index_equal(result, expected)
class TestFromTuples(Base):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_tuples
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
if len(breaks) == 0:
return {"data": breaks}
tuples = list(zip(breaks[:-1], breaks[1:]))
if isinstance(breaks, (list, tuple)):
return {"data": tuples}
elif is_categorical_dtype(breaks):
return {"data": breaks._constructor(tuples)}
return {"data": com.asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
# non-tuple
tuples = [(0, 1), 2, (3, 4)]
msg = "IntervalIndex.from_tuples received an invalid item, 2"
with pytest.raises(TypeError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
# too few/many items
tuples = [(0, 1), (2,), (3, 4)]
msg = "IntervalIndex.from_tuples requires tuples of length 2, got {t}"
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
tuples = [(0, 1), (2, 3, 4), (5, 6)]
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
def test_na_tuples(self):
# tuple (NA, NA) evaluates the same as NA as an element
na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
tm.assert_index_equal(idx_na_tuple, idx_na_element)
class TestClassConstructors(Base):
"""Tests specific to the IntervalIndex/Index constructors"""
@pytest.fixture(
params=[IntervalIndex, partial(Index, dtype="interval")],
ids=["IntervalIndex", "Index"],
)
def constructor(self, request):
return request.param
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
"""
if len(breaks) == 0:
return {"data": breaks}
ivs = [
Interval(l, r, closed) if notna(l) else l
for l, r in zip(breaks[:-1], breaks[1:])
]
if isinstance(breaks, list):
return {"data": ivs}
elif is_categorical_dtype(breaks):
return {"data": breaks._constructor(ivs)}
return {"data": np.array(ivs, dtype=object)}
def test_generic_errors(self, constructor):
"""
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
pass
def test_constructor_string(self):
# GH23013
# When forming the interval from breaks,
# the interval of strings is already forbidden.
pass
def test_constructor_errors(self, constructor):
# mismatched closed within intervals with no constructor override
ivs = [Interval(0, 1, closed="right"), Interval(2, 3, closed="left")]
msg = "intervals must all be closed on the same side"
with pytest.raises(ValueError, match=msg):
constructor(ivs)
# scalar
msg = (
r"IntervalIndex\(...\) must be called with a collection of "
"some kind, 5 was passed"
)
with pytest.raises(TypeError, match=msg):
constructor(5)
# not an interval
msg = "type <class 'numpy.int64'> with value 0 is not an interval"
with pytest.raises(TypeError, match=msg):
constructor([0, 1])
@pytest.mark.parametrize(
"data, closed",
[
([], "both"),
([np.nan, np.nan], "neither"),
(
[Interval(0, 3, closed="neither"), Interval(2, 5, closed="neither")],
"left",
),
(
[Interval(0, 3, closed="left"), Interval(2, 5, closed="right")],
"neither",
),
(IntervalIndex.from_breaks(range(5), closed="both"), "right"),
],
)
def test_override_inferred_closed(self, constructor, data, closed):
# GH 19370
if isinstance(data, IntervalIndex):
tuples = data.to_tuples()
else:
tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data]
expected = IntervalIndex.from_tuples(tuples, closed=closed)
result = constructor(data, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"values_constructor", [list, np.array, IntervalIndex, IntervalArray]
)
def test_index_object_dtype(self, values_constructor):
# Index(intervals, dtype=object) is an Index (not an IntervalIndex)
intervals = [Interval(0, 1), Interval(1, 2), Interval(2, 3)]
values = values_constructor(intervals)
result = Index(values, dtype=object)
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
def test_index_mixed_closed(self):
# GH27172
intervals = [
Interval(0, 1, closed="left"),
Interval(1, 2, closed="right"),
Interval(2, 3, closed="neither"),
Interval(3, 4, closed="both"),
]
result = Index(intervals)
expected = Index(intervals, dtype=object)
tm.assert_index_equal(result, expected)
class TestFromIntervals(TestClassConstructors):
"""
Tests for IntervalIndex.from_intervals, which is deprecated in favor of the
IntervalIndex constructor. Same tests as the IntervalIndex constructor,
plus deprecation test. Should only need to delete this class when removed.
"""
@pytest.fixture
def constructor(self):
def from_intervals_ignore_warnings(*args, **kwargs):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
return IntervalIndex.from_intervals(*args, **kwargs)
return from_intervals_ignore_warnings
def test_deprecated(self):
ivs = [Interval(0, 1), Interval(1, 2)]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
IntervalIndex.from_intervals(ivs)
@pytest.mark.skip(reason="parent class test that is not applicable")
def test_index_object_dtype(self):
pass
@pytest.mark.skip(reason="parent class test that is not applicable")
def test_index_mixed_closed(self):
pass
| {
"repo_name": "toobaz/pandas",
"path": "pandas/tests/indexes/interval/test_construction.py",
"copies": "2",
"size": "16294",
"license": "bsd-3-clause",
"hash": -272567250315981600,
"line_mean": 35.0486725664,
"line_max": 87,
"alpha_frac": 0.611820302,
"autogenerated": false,
"ratio": 4.069430569430569,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019991227810375577,
"num_lines": 452
} |
from functools import partial
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
concat,
isna,
notna,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1.0 * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = idx / (values.shape[0] - 1)
qhig = (idx + 1) / (values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_series(series, q):
compare_func = partial(scoreatpercentile, per=q)
result = series.rolling(50).quantile(q)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_frame(raw, frame, q):
compare_func = partial(scoreatpercentile, per=q)
result = frame.rolling(50).quantile(q)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_time_rule_series(series, q):
compare_func = partial(scoreatpercentile, per=q)
win = 25
ser = series[::2].resample("B").mean()
series_result = ser.rolling(window=win, min_periods=10).quantile(q)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_time_rule_frame(raw, frame, q):
compare_func = partial(scoreatpercentile, per=q)
win = 25
frm = frame[::2].resample("B").mean()
frame_result = frm.rolling(window=win, min_periods=10).quantile(q)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_nans(q):
compare_func = partial(scoreatpercentile, per=q)
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(50, min_periods=30).quantile(q)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = obj.rolling(20, min_periods=15).quantile(q)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = obj2.rolling(10, min_periods=5).quantile(q)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
result0 = obj.rolling(20, min_periods=0).quantile(q)
result1 = obj.rolling(20, min_periods=1).quantile(q)
tm.assert_almost_equal(result0, result1)
@pytest.mark.parametrize("minp", [0, 99, 100])
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_min_periods(series, minp, q):
result = series.rolling(len(series) + 1, min_periods=minp).quantile(q)
expected = series.rolling(len(series), min_periods=minp).quantile(q)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_center(q):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(20, center=True).quantile(q)
expected = (
concat([obj, Series([np.NaN] * 9)])
.rolling(20)
.quantile(q)[9:]
.reset_index(drop=True)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_center_reindex_series(series, q):
# shifter index
s = [f"x{x:d}" for x in range(12)]
series_xp = (
series.reindex(list(series.index) + s)
.rolling(window=25)
.quantile(q)
.shift(-12)
.reindex(series.index)
)
series_rs = series.rolling(window=25, center=True).quantile(q)
tm.assert_series_equal(series_xp, series_rs)
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_center_reindex_frame(frame, q):
# shifter index
s = [f"x{x:d}" for x in range(12)]
frame_xp = (
frame.reindex(list(frame.index) + s)
.rolling(window=25)
.quantile(q)
.shift(-12)
.reindex(frame.index)
)
frame_rs = frame.rolling(window=25, center=True).quantile(q)
tm.assert_frame_equal(frame_xp, frame_rs)
| {
"repo_name": "datapythonista/pandas",
"path": "pandas/tests/window/moments/test_moments_rolling_quantile.py",
"copies": "4",
"size": "5062",
"license": "bsd-3-clause",
"hash": -5044813251357502000,
"line_mean": 28.4302325581,
"line_max": 74,
"alpha_frac": 0.6201106282,
"autogenerated": false,
"ratio": 2.8761363636363635,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5496246991836363,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
concat,
isna,
notna,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_series(series, sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
result = getattr(series.rolling(50), roll_func)()
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_frame(raw, frame, sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
result = getattr(frame.rolling(50), roll_func)()
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_time_rule_series(series, sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=10), roll_func)()
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_time_rule_frame(raw, frame, sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=10), roll_func)()
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@td.skip_if_no_scipy
@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])
def test_nans(sp_func, roll_func):
import scipy.stats
compare_func = partial(getattr(scipy.stats, sp_func), bias=False)
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)()
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)()
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)()
assert isna(result.iloc[3])
assert notna(result.iloc[4])
result0 = getattr(obj.rolling(20, min_periods=0), roll_func)()
result1 = getattr(obj.rolling(20, min_periods=1), roll_func)()
tm.assert_almost_equal(result0, result1)
@pytest.mark.parametrize("minp", [0, 99, 100])
@pytest.mark.parametrize("roll_func", ["kurt", "skew"])
def test_min_periods(series, minp, roll_func):
result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)()
expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)()
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
@pytest.mark.parametrize("roll_func", ["kurt", "skew"])
def test_center(roll_func):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(20, center=True), roll_func)()
expected = getattr(concat([obj, Series([np.NaN] * 9)]).rolling(20), roll_func)()[
9:
].reset_index(drop=True)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("roll_func", ["kurt", "skew"])
def test_center_reindex_series(series, roll_func):
# shifter index
s = [f"x{x:d}" for x in range(12)]
series_xp = (
getattr(
series.reindex(list(series.index) + s).rolling(window=25),
roll_func,
)()
.shift(-12)
.reindex(series.index)
)
series_rs = getattr(series.rolling(window=25, center=True), roll_func)()
tm.assert_series_equal(series_xp, series_rs)
@pytest.mark.slow
@pytest.mark.parametrize("roll_func", ["kurt", "skew"])
def test_center_reindex_frame(frame, roll_func):
# shifter index
s = [f"x{x:d}" for x in range(12)]
frame_xp = (
getattr(
frame.reindex(list(frame.index) + s).rolling(window=25),
roll_func,
)()
.shift(-12)
.reindex(frame.index)
)
frame_rs = getattr(frame.rolling(window=25, center=True), roll_func)()
tm.assert_frame_equal(frame_xp, frame_rs)
| {
"repo_name": "datapythonista/pandas",
"path": "pandas/tests/window/moments/test_moments_rolling_skew_kurt.py",
"copies": "3",
"size": "5452",
"license": "bsd-3-clause",
"hash": -9119401419644678000,
"line_mean": 31.0705882353,
"line_max": 88,
"alpha_frac": 0.644350697,
"autogenerated": false,
"ratio": 3.1083238312428736,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252674528242873,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import scipy
import skbio
def expand_otu_ids(ids, counts):
"""Lists the otu id the number of times provided in count
Paramaters
----------
ids: iterable
A list of the ids, corresponding to the value in `counts`
counts : iterable
A list of count values, named by the id in `ids`.
Returns
-------
1D-ndarray
the id value listed the number of times for which it has a count
1D-ndarray
the list of ids which have zero counts
"""
ids = ids.astype('object')
id_list = np.hstack([np.array(count * [id_])
for (count, id_) in zip(list(counts), list(ids))])
return id_list
def collapse_otu_ids(id_list, order=None):
"""Collapses a list of ids appearing in a sample to counts
Parameters
----------
id_list: ndarray
the id value listed the number of times for which it has a count
order: ndarray, optional
the order in which the final OTU counts should be returned. If no
order is included, then OTUs will be returned sorted by ID. If an
order is supplied and does not appear in `id_list`, a value of 0
will be returned for that ID.
Returns
-------
1D-ndarray
A list of the ids, corresponding to the value in `counts`
1D-ndarray
A list of count values, named by the id in `ids`.
"""
if order is None:
order = np.unique(id_list)
counts = np.array([np.count_nonzero(id_list == id_)
for id_ in order])
return order, counts
def subsample_features(counts, depth, feature_ids=None, bootstrap=True):
"""Generates a subsampled vector of values for each row in a table
Parameters
----------
counts : ndarray
An m x n array containing c total counts. Subsampling will be performed
on the rows.
depth : int
The number of observations to draw per row
feature_ids: 1D-ndarray, optional
A 1D-ndarray of length m which names the rows
bootstrap: bool, optional
When `true`, subsampling with replacement will be performed.
Returns
-------
ndarray
An m x n array, where each row sums to `depth`.
"""
if feature_ids is None:
feature_ids = np.arange(0, counts.shape[1])
new_table = []
for sample in counts:
expanded = expand_otu_ids(feature_ids, sample)
subsampled = np.random.choice(expanded, depth, replace=bootstrap)
new_table.append(collapse_otu_ids(subsampled, order=feature_ids)[1])
return np.vstack(new_table)
def bootstrap_permanova(obs_ids, obs, depth, grouping,
bootstrap=True, metric=None, permutations=99,
metric_kws=None):
"""Calculates a bootstrapped permanova for samples within the OTU table
Parameters
----------
obs_ids: array-like
A list of ids in the observation table and grouping. The ids do not
have to be unique. Must be a subset of the ids in both `obs` and
`grouping`.
obs: ndarray
A pandas dataframe of the observational data where the rows are the
observations and the columns are the features. Note that if this is
transformed from a biom object, the object will need to be transposed.
depth : int
The number of observations to draw for each observation
grouping : Series
Vector indicating the assignment of objects to groups.
bootstrap: bool, optional
When `true`, feature counts can be drawn with replacement for each
observation.
metric: bool, optional
The distance metric to be used for the distance matrix calculation. If
no metric is specified, bray-curtis distance will be used.
permutations : int, optional
Number of permutations to use when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
metric_kws: dict, optional
A key/value pair of keyword arguments for the distance calculation.
Returns
-------
float
The p-value for the permutation test
Also See
--------
scipy.spatial.distance.braycurtis
skbio.stats.distance.permanova
"""
if metric is None:
metric = scipy.spatial.distance.braycurtis
elif metric_kws is not None:
metric = partial(metric, **metric_kws)
obs_ids = np.hstack(obs_ids)
feature_ids = obs.columns
# Gets the rarified table
rare = subsample_features(obs.loc[obs_ids].values,
depth=depth,
feature_ids=feature_ids,
bootstrap=bootstrap)
grouping = grouping.loc[obs_ids]
# Calculates the distance matrix from the bootstrapped feature x
# observation table
dm = skbio.DistanceMatrix.from_iterable(rare, metric=metric)
# Performs the permanova on the distance matrix.
permanova_res = skbio.stats.distance.permanova(dm, grouping.values,
permutations=permutations)
return permanova_res, dm
| {
"repo_name": "jwdebelius/Machiavellian",
"path": "machivellian/beta.py",
"copies": "1",
"size": "5277",
"license": "bsd-3-clause",
"hash": 674973792075310300,
"line_mean": 31.3742331288,
"line_max": 79,
"alpha_frac": 0.6325563767,
"autogenerated": false,
"ratio": 4.290243902439024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5422800279139024,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import theano
import theano.tensor as T
from scipy import stats
from .dist_math import bound, factln, binomln, betaln, logpow
from .distribution import Discrete, draw_values, generate_samples
__all__ = ['Binomial', 'BetaBinomial', 'Bernoulli', 'Poisson',
'NegativeBinomial', 'ConstantDist', 'ZeroInflatedPoisson',
'DiscreteUniform', 'Geometric', 'Categorical']
class Binomial(Discrete):
r"""
Binomial log-likelihood.
The discrete probability distribution of the number of successes
in a sequence of n independent yes/no experiments, each of which
yields success with probability p.
.. math:: f(x \mid n, p) = \binom{n}{x} p^x (1-p)^{n-x}
======== ==========================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n p`
Variance :math:`n p (1 - p)`
======== ==========================================
Parameters
----------
n : int
Number of Bernoulli trials (n >= 0).
p : float
Probability of success in each trial (0 < p < 1).
"""
def __init__(self, n, p, *args, **kwargs):
super(Binomial, self).__init__(*args, **kwargs)
self.n = n
self.p = p
self.mode = T.cast(T.round(n * p), self.dtype)
def random(self, point=None, size=None, repeat=None):
n, p = draw_values([self.n, self.p], point=point)
return generate_samples(stats.binom.rvs, n=n, p=p,
dist_shape=self.shape,
size=size)
def logp(self, value):
n = self.n
p = self.p
return bound(
binomln(n, value) + logpow(p, value) + logpow(1 - p, n - value),
0 <= value, value <= n,
0 <= p, p <= 1)
class BetaBinomial(Discrete):
r"""
Beta-binomial log-likelihood.
Equivalent to binomial random variable with success probability
drawn from a beta distribution.
.. math::
f(x \mid \alpha, \beta, n) =
\binom{n}{x}
\frac{B(x + \alpha, n - x + \beta)}{B(\alpha, \beta)}
======== =================================================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n \dfrac{\alpha}{\alpha + \beta}`
Variance :math:`n \dfrac{\alpha \beta}{(\alpha+\beta)^2 (\alpha+\beta+1)}`
======== =================================================================
Parameters
----------
n : int
Number of Bernoulli trials (n >= 0).
alpha : float
alpha > 0.
beta : float
beta > 0.
"""
def __init__(self, alpha, beta, n, *args, **kwargs):
super(BetaBinomial, self).__init__(*args, **kwargs)
self.alpha = alpha
self.beta = beta
self.n = n
self.mode = T.cast(T.round(alpha / (alpha + beta)), 'int8')
def _random(self, alpha, beta, n, size=None):
size = size or 1
p = np.atleast_1d(stats.beta.rvs(a=alpha, b=beta, size=np.prod(size)))
# Sometimes scipy.beta returns nan. Ugh.
while np.any(np.isnan(p)):
i = np.isnan(p)
p[i] = stats.beta.rvs(a=alpha, b=beta, size=np.sum(i))
# Sigh...
_n, _p, _size = np.atleast_1d(n).flatten(), p.flatten(), np.prod(size)
samples = np.reshape(stats.binom.rvs(n=_n, p=_p, size=_size), size)
return samples
def random(self, point=None, size=None, repeat=None):
alpha, beta, n = \
draw_values([self.alpha, self.beta, self.n], point=point)
return generate_samples(self._random, alpha=alpha, beta=beta, n=n,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(binomln(self.n, value)
+ betaln(value + alpha, self.n - value + beta)
- betaln(alpha, beta),
value >= 0, value <= self.n,
alpha > 0, beta > 0)
class Bernoulli(Discrete):
r"""Bernoulli log-likelihood
The Bernoulli distribution describes the probability of successes
(x=1) and failures (x=0).
.. math:: f(x \mid p) = p^{x} (1-p)^{1-x}
======== ======================
Support :math:`x \in \{0, 1\}`
Mean :math:`p`
Variance :math:`p (1 - p)`
======== ======================
Parameters
----------
p : float
Probability of success (0 < p < 1).
"""
def __init__(self, p, *args, **kwargs):
super(Bernoulli, self).__init__(*args, **kwargs)
self.p = p
self.mode = T.cast(T.round(p), 'int8')
def random(self, point=None, size=None, repeat=None):
p = draw_values([self.p], point=point)
return generate_samples(stats.bernoulli.rvs, p,
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
return bound(
T.switch(value, T.log(p), T.log(1 - p)),
value >= 0, value <= 1,
p >= 0, p <= 1)
class Poisson(Discrete):
r"""
Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
.. math:: f(x \mid \mu) = \frac{e^{-\mu}\mu^x}{x!}
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
Variance :math:`\mu`
======== ==========================
Parameters
----------
mu : float
Expected number of occurrences during the given interval
(mu >= 0).
Notes
-----
The Poisson distribution can be derived as a limiting case of the
binomial distribution.
"""
def __init__(self, mu, *args, **kwargs):
super(Poisson, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = T.floor(mu).astype('int32')
def random(self, point=None, size=None, repeat=None):
mu = draw_values([self.mu], point=point)
return generate_samples(stats.poisson.rvs, mu,
dist_shape=self.shape,
size=size)
def logp(self, value):
mu = self.mu
return bound(
logpow(mu, value) - factln(value) - mu,
mu >= 0, value >= 0)
class NegativeBinomial(Discrete):
r"""
Negative binomial log-likelihood.
The negative binomial distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
.. math::
f(x \mid \mu, \alpha) =
\frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)}
(\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
======== ==========================
Parameters
----------
mu : float
Poission distribution parameter (mu > 0).
alpha : float
Gamma distribution parameter (alpha > 0).
"""
def __init__(self, mu, alpha, *args, **kwargs):
super(NegativeBinomial, self).__init__(*args, **kwargs)
self.mu = mu
self.alpha = alpha
self.mode = T.floor(mu).astype('int32')
def random(self, point=None, size=None, repeat=None):
mu, alpha = draw_values([self.mu, self.alpha], point=point)
g = generate_samples(stats.gamma.rvs, alpha, scale=alpha / mu,
dist_shape=self.shape,
size=size)
g[g == 0] = np.finfo(float).eps # Just in case
return stats.poisson.rvs(g)
def logp(self, value):
mu = self.mu
alpha = self.alpha
negbinom = bound(binomln(value + alpha - 1, value)
+ logpow(mu / (mu + alpha), value)
+ logpow(alpha / (mu + alpha), alpha),
value >= 0, mu > 0, alpha > 0)
# Return Poisson when alpha gets very large.
return T.switch(1 * (alpha > 1e10),
Poisson.dist(self.mu).logp(value),
negbinom)
class Geometric(Discrete):
r"""
Geometric log-likelihood.
The probability that the first success in a sequence of Bernoulli
trials occurs on the x'th trial.
.. math:: f(x \mid p) = p(1-p)^{x-1}
======== =============================
Support :math:`x \in \mathbb{N}_{>0}`
Mean :math:`\dfrac{1}{p}`
Variance :math:`\dfrac{1 - p}{p^2}`
======== =============================
Parameters
----------
p : float
Probability of success on an individual trial (0 < p <= 1).
"""
def __init__(self, p, *args, **kwargs):
super(Geometric, self).__init__(*args, **kwargs)
self.p = p
self.mode = 1
def random(self, point=None, size=None, repeat=None):
p = draw_values([self.p], point=point)
return generate_samples(np.random.geometric, p,
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
return bound(T.log(p) + logpow(1 - p, value - 1),
0 <= p, p <= 1, value >= 1)
class DiscreteUniform(Discrete):
r"""
Discrete uniform distribution.
.. math:: f(x \mid lower, upper) = \frac{1}{upper-lower}
======== ===============================================
Support :math:`x \in {lower, lower + 1, \ldots, upper}`
Mean :math:`\dfrac{lower + upper}{2}`
Variance :math:`\dfrac{(upper - lower)^2}{12}`
======== ===============================================
Parameters
----------
lower : int
Lower limit.
upper : int
Upper limit (upper > lower).
"""
def __init__(self, lower, upper, *args, **kwargs):
super(DiscreteUniform, self).__init__(*args, **kwargs)
self.lower = T.floor(lower).astype('int32')
self.upper = T.floor(upper).astype('int32')
self.mode = T.floor((upper - lower) / 2.).astype('int32')
def _random(self, lower, upper, size=None):
# This way seems to be the only to deal with lower and upper
# as array-like.
samples = stats.uniform.rvs(lower, upper - lower - np.finfo(float).eps,
size=size)
return np.floor(samples).astype('int32')
def random(self, point=None, size=None, repeat=None):
lower, upper = draw_values([self.lower, self.upper], point=point)
return generate_samples(self._random,
lower, upper,
dist_shape=self.shape,
size=size)
def logp(self, value):
upper = self.upper
lower = self.lower
return bound(-T.log(upper - lower + 1),
lower <= value, value <= upper)
class Categorical(Discrete):
r"""
Categorical log-likelihood.
The most general discrete distribution.
.. math:: f(x \mid p) = p_x
======== ===================================
Support :math:`x \in \{1, 2, \ldots, |p|\}`
======== ===================================
Parameters
----------
p : float
p > 0 and the elements of p must sum to 1.
"""
def __init__(self, p, *args, **kwargs):
super(Categorical, self).__init__(*args, **kwargs)
self.k = np.shape(p)[-1]
self.p = T.as_tensor_variable(p)
self.mode = T.argmax(p)
def random(self, point=None, size=None, repeat=None):
p, k = draw_values([self.p, self.k], point=point)
return generate_samples(partial(np.random.choice, np.arange(k)),
p=p,
broadcast_shape=p.shape[:-1] or (1,),
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
k = self.k
sumto1 = theano.gradient.zero_grad(T.le(abs(T.sum(p) - 1), 1e-5))
return bound(T.log(p[value]),
value >= 0, value <= (k - 1),
sumto1)
class ConstantDist(Discrete):
"""
Constant log-likelihood.
Parameters
----------
value : float or int
Constant parameter.
"""
def __init__(self, c, *args, **kwargs):
super(ConstantDist, self).__init__(*args, **kwargs)
self.mean = self.median = self.mode = self.c = c
def random(self, point=None, size=None, repeat=None):
c = draw_values([self.c], point=point)
dtype = np.array(c).dtype
def _random(c, dtype=dtype, size=None):
return np.full(size, fill_value=c, dtype=dtype)
return generate_samples(_random, c=c, dist_shape=self.shape,
size=size).astype(dtype)
def logp(self, value):
c = self.c
return bound(0, T.eq(value, c))
class ZeroInflatedPoisson(Discrete):
def __init__(self, theta, z, *args, **kwargs):
super(ZeroInflatedPoisson, self).__init__(*args, **kwargs)
self.theta = theta
self.z = z
self.pois = Poisson.dist(theta)
self.const = ConstantDist.dist(0)
self.mode = self.pois.mode
def random(self, point=None, size=None, repeat=None):
theta = draw_values([self.theta], point=point)
# To do: Finish me
return None
def logp(self, value):
return T.switch(self.z,
self.pois.logp(value),
self.const.logp(value))
| {
"repo_name": "superbobry/pymc3",
"path": "pymc3/distributions/discrete.py",
"copies": "1",
"size": "13728",
"license": "apache-2.0",
"hash": -4800759721221485000,
"line_mean": 31,
"line_max": 79,
"alpha_frac": 0.4952651515,
"autogenerated": false,
"ratio": 3.7082658022690436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47035309537690434,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
import theano
import theano.tensor as tt
from scipy import stats
from .dist_math import bound, factln, binomln, betaln, logpow
from .distribution import Discrete, draw_values, generate_samples
__all__ = ['Binomial', 'BetaBinomial', 'Bernoulli', 'Poisson',
'NegativeBinomial', 'ConstantDist', 'Constant', 'ZeroInflatedPoisson',
'ZeroInflatedNegativeBinomial', 'DiscreteUniform', 'Geometric',
'Categorical']
class Binomial(Discrete):
R"""
Binomial log-likelihood.
The discrete probability distribution of the number of successes
in a sequence of n independent yes/no experiments, each of which
yields success with probability p.
.. math:: f(x \mid n, p) = \binom{n}{x} p^x (1-p)^{n-x}
======== ==========================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n p`
Variance :math:`n p (1 - p)`
======== ==========================================
Parameters
----------
n : int
Number of Bernoulli trials (n >= 0).
p : float
Probability of success in each trial (0 < p < 1).
"""
def __init__(self, n, p, *args, **kwargs):
super(Binomial, self).__init__(*args, **kwargs)
self.n = n
self.p = p
self.mode = tt.cast(tt.round(n * p), self.dtype)
def random(self, point=None, size=None, repeat=None):
n, p = draw_values([self.n, self.p], point=point)
return generate_samples(stats.binom.rvs, n=n, p=p,
dist_shape=self.shape,
size=size)
def logp(self, value):
n = self.n
p = self.p
return bound(
binomln(n, value) + logpow(p, value) + logpow(1 - p, n - value),
0 <= value, value <= n,
0 <= p, p <= 1)
class BetaBinomial(Discrete):
R"""
Beta-binomial log-likelihood.
Equivalent to binomial random variable with success probability
drawn from a beta distribution.
.. math::
f(x \mid \alpha, \beta, n) =
\binom{n}{x}
\frac{B(x + \alpha, n - x + \beta)}{B(\alpha, \beta)}
======== =================================================================
Support :math:`x \in \{0, 1, \ldots, n\}`
Mean :math:`n \dfrac{\alpha}{\alpha + \beta}`
Variance :math:`n \dfrac{\alpha \beta}{(\alpha+\beta)^2 (\alpha+\beta+1)}`
======== =================================================================
Parameters
----------
n : int
Number of Bernoulli trials (n >= 0).
alpha : float
alpha > 0.
beta : float
beta > 0.
"""
def __init__(self, alpha, beta, n, *args, **kwargs):
super(BetaBinomial, self).__init__(*args, **kwargs)
self.alpha = alpha
self.beta = beta
self.n = n
self.mode = tt.cast(tt.round(alpha / (alpha + beta)), 'int8')
def _random(self, alpha, beta, n, size=None):
size = size or 1
p = np.atleast_1d(stats.beta.rvs(a=alpha, b=beta, size=np.prod(size)))
# Sometimes scipy.beta returns nan. Ugh.
while np.any(np.isnan(p)):
i = np.isnan(p)
p[i] = stats.beta.rvs(a=alpha, b=beta, size=np.sum(i))
# Sigh...
_n, _p, _size = np.atleast_1d(n).flatten(), p.flatten(), np.prod(size)
samples = np.reshape(stats.binom.rvs(n=_n, p=_p, size=_size), size)
return samples
def random(self, point=None, size=None, repeat=None):
alpha, beta, n = \
draw_values([self.alpha, self.beta, self.n], point=point)
return generate_samples(self._random, alpha=alpha, beta=beta, n=n,
dist_shape=self.shape,
size=size)
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(binomln(self.n, value)
+ betaln(value + alpha, self.n - value + beta)
- betaln(alpha, beta),
value >= 0, value <= self.n,
alpha > 0, beta > 0)
class Bernoulli(Discrete):
R"""Bernoulli log-likelihood
The Bernoulli distribution describes the probability of successes
(x=1) and failures (x=0).
.. math:: f(x \mid p) = p^{x} (1-p)^{1-x}
======== ======================
Support :math:`x \in \{0, 1\}`
Mean :math:`p`
Variance :math:`p (1 - p)`
======== ======================
Parameters
----------
p : float
Probability of success (0 < p < 1).
"""
def __init__(self, p, *args, **kwargs):
super(Bernoulli, self).__init__(*args, **kwargs)
self.p = p
self.mode = tt.cast(tt.round(p), 'int8')
def random(self, point=None, size=None, repeat=None):
p = draw_values([self.p], point=point)
return generate_samples(stats.bernoulli.rvs, p,
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
return bound(
tt.switch(value, tt.log(p), tt.log(1 - p)),
value >= 0, value <= 1,
p >= 0, p <= 1)
class Poisson(Discrete):
R"""
Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
.. math:: f(x \mid \mu) = \frac{e^{-\mu}\mu^x}{x!}
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
Variance :math:`\mu`
======== ==========================
Parameters
----------
mu : float
Expected number of occurrences during the given interval
(mu >= 0).
Notes
-----
The Poisson distribution can be derived as a limiting case of the
binomial distribution.
"""
def __init__(self, mu, *args, **kwargs):
super(Poisson, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = tt.floor(mu).astype('int32')
def random(self, point=None, size=None, repeat=None):
mu = draw_values([self.mu], point=point)
return generate_samples(stats.poisson.rvs, mu,
dist_shape=self.shape,
size=size)
def logp(self, value):
mu = self.mu
log_prob = bound(
logpow(mu, value) - factln(value) - mu,
mu >= 0, value >= 0)
# Return zero when mu and value are both zero
return tt.switch(1 * tt.eq(mu, 0) * tt.eq(value, 0),
0, log_prob)
class NegativeBinomial(Discrete):
R"""
Negative binomial log-likelihood.
The negative binomial distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
.. math::
f(x \mid \mu, \alpha) =
\frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)}
(\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\mu`
======== ==========================
Parameters
----------
mu : float
Poission distribution parameter (mu > 0).
alpha : float
Gamma distribution parameter (alpha > 0).
"""
def __init__(self, mu, alpha, *args, **kwargs):
super(NegativeBinomial, self).__init__(*args, **kwargs)
self.mu = mu
self.alpha = alpha
self.mode = tt.floor(mu).astype('int32')
def random(self, point=None, size=None, repeat=None):
mu, alpha = draw_values([self.mu, self.alpha], point=point)
g = generate_samples(stats.gamma.rvs, alpha, scale=mu / alpha,
dist_shape=self.shape,
size=size)
g[g == 0] = np.finfo(float).eps # Just in case
return stats.poisson.rvs(g)
def logp(self, value):
mu = self.mu
alpha = self.alpha
negbinom = bound(binomln(value + alpha - 1, value)
+ logpow(mu / (mu + alpha), value)
+ logpow(alpha / (mu + alpha), alpha),
value >= 0, mu > 0, alpha > 0)
# Return Poisson when alpha gets very large.
return tt.switch(1 * (alpha > 1e10),
Poisson.dist(self.mu).logp(value),
negbinom)
class Geometric(Discrete):
R"""
Geometric log-likelihood.
The probability that the first success in a sequence of Bernoulli
trials occurs on the x'th trial.
.. math:: f(x \mid p) = p(1-p)^{x-1}
======== =============================
Support :math:`x \in \mathbb{N}_{>0}`
Mean :math:`\dfrac{1}{p}`
Variance :math:`\dfrac{1 - p}{p^2}`
======== =============================
Parameters
----------
p : float
Probability of success on an individual trial (0 < p <= 1).
"""
def __init__(self, p, *args, **kwargs):
super(Geometric, self).__init__(*args, **kwargs)
self.p = p
self.mode = 1
def random(self, point=None, size=None, repeat=None):
p = draw_values([self.p], point=point)
return generate_samples(np.random.geometric, p,
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
return bound(tt.log(p) + logpow(1 - p, value - 1),
0 <= p, p <= 1, value >= 1)
class DiscreteUniform(Discrete):
R"""
Discrete uniform distribution.
.. math:: f(x \mid lower, upper) = \frac{1}{upper-lower}
======== ===============================================
Support :math:`x \in {lower, lower + 1, \ldots, upper}`
Mean :math:`\dfrac{lower + upper}{2}`
Variance :math:`\dfrac{(upper - lower)^2}{12}`
======== ===============================================
Parameters
----------
lower : int
Lower limit.
upper : int
Upper limit (upper > lower).
"""
def __init__(self, lower, upper, *args, **kwargs):
super(DiscreteUniform, self).__init__(*args, **kwargs)
self.lower = tt.floor(lower).astype('int32')
self.upper = tt.floor(upper).astype('int32')
self.mode = tt.maximum(
tt.floor((upper - lower) / 2.).astype('int32'), self.lower)
def _random(self, lower, upper, size=None):
# This way seems to be the only to deal with lower and upper
# as array-like.
samples = stats.uniform.rvs(lower, upper - lower - np.finfo(float).eps,
size=size)
return np.floor(samples).astype('int32')
def random(self, point=None, size=None, repeat=None):
lower, upper = draw_values([self.lower, self.upper], point=point)
return generate_samples(self._random,
lower, upper,
dist_shape=self.shape,
size=size)
def logp(self, value):
upper = self.upper
lower = self.lower
return bound(-tt.log(upper - lower + 1),
lower <= value, value <= upper)
class Categorical(Discrete):
R"""
Categorical log-likelihood.
The most general discrete distribution.
.. math:: f(x \mid p) = p_x
======== ===================================
Support :math:`x \in \{1, 2, \ldots, |p|\}`
======== ===================================
Parameters
----------
p : array of floats
p > 0 and the elements of p must sum to 1. They will be automatically
rescaled otherwise.
"""
def __init__(self, p, *args, **kwargs):
super(Categorical, self).__init__(*args, **kwargs)
try:
self.k = tt.shape(p)[-1].tag.test_value
except AttributeError:
self.k = tt.shape(p)[-1]
self.p = tt.as_tensor_variable(p)
self.p = (p.T / tt.sum(p, -1)).T
self.mode = tt.argmax(p)
def random(self, point=None, size=None, repeat=None):
def random_choice(k, *args, **kwargs):
if len(kwargs['p'].shape) > 1:
return np.asarray(
[np.random.choice(k, p=p)
for p in kwargs['p']]
)
else:
return np.random.choice(k, *args, **kwargs)
p, k = draw_values([self.p, self.k], point=point)
return generate_samples(partial(random_choice, np.arange(k)),
p=p,
broadcast_shape=p.shape[:-1] or (1,),
dist_shape=self.shape,
size=size)
def logp(self, value):
p = self.p
k = self.k
sumto1 = theano.gradient.zero_grad(
tt.le(abs(tt.sum(p, axis=-1) - 1), 1e-5))
if p.ndim > 1:
a = tt.log(p[tt.arange(p.shape[0]), value])
else:
a = tt.log(p[value])
return bound(a,
value >= 0, value <= (k - 1),
sumto1)
class Constant(Discrete):
"""
Constant log-likelihood.
Parameters
----------
value : float or int
Constant parameter.
"""
def __init__(self, c, *args, **kwargs):
super(Constant, self).__init__(*args, **kwargs)
self.mean = self.median = self.mode = self.c = c
def random(self, point=None, size=None, repeat=None):
c = draw_values([self.c], point=point)
dtype = np.array(c).dtype
def _random(c, dtype=dtype, size=None):
return np.full(size, fill_value=c, dtype=dtype)
return generate_samples(_random, c=c, dist_shape=self.shape,
size=size).astype(dtype)
def logp(self, value):
c = self.c
return bound(0, tt.eq(value, c))
def ConstantDist(*args, **kwargs):
warnings.warn("ConstantDist has been deprecated. In future, use Constant instead.",
DeprecationWarning)
return Constant(*args, **kwargs)
class ZeroInflatedPoisson(Discrete):
R"""
Zero-inflated Poisson log-likelihood.
Often used to model the number of events occurring in a fixed period
of time when the times at which events occur are independent.
.. math::
f(x \mid \theta, \psi) = \left\{ \begin{array}{l}
(1-\psi) + \psi e^{-\theta}, \text{if } x = 0 \\
\psi \frac{e^{-\theta}\theta^x}{x!}, \text{if } x=1,2,3,\ldots
\end{array} \right.
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi\theta`
Variance :math:`\theta + \frac{1-\psi}{\psi}\theta^2`
======== ==========================
Parameters
----------
theta : float
Expected number of occurrences during the given interval
(theta >= 0).
psi : float
Expected proportion of Poisson variates (0 < psi < 1)
"""
def __init__(self, theta, psi, *args, **kwargs):
super(ZeroInflatedPoisson, self).__init__(*args, **kwargs)
self.theta = theta
self.psi = psi
self.pois = Poisson.dist(theta)
self.mode = self.pois.mode
def random(self, point=None, size=None, repeat=None):
theta, psi = draw_values([self.theta, self.psi], point=point)
g = generate_samples(stats.poisson.rvs, theta,
dist_shape=self.shape,
size=size)
return g * (np.random.random(np.squeeze(g.shape)) < psi)
def logp(self, value):
return tt.switch(value > 0,
tt.log(self.psi) + self.pois.logp(value),
tt.log((1. - self.psi) + self.psi * tt.exp(-self.theta)))
class ZeroInflatedNegativeBinomial(Discrete):
R"""
Zero-Inflated Negative binomial log-likelihood.
The Zero-inflated version of the Negative Binomial (NB).
The NB distribution describes a Poisson random variable
whose rate parameter is gamma distributed.
.. math::
f(x \mid \mu, \alpha, \psi) = \left\{ \begin{array}{l}
(1-\psi) + \psi \left (\frac{\alpha}{\alpha+\mu} \right) ^\alpha, \text{if } x = 0 \\
\psi \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} \left (\frac{\alpha}{\mu+\alpha} \right)^\alpha \left( \frac{\mu}{\mu+\alpha} \right)^x, \text{if } x=1,2,3,\ldots
\end{array} \right.
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi\mu`
Var :math:`\psi\mu + \left (1 + \frac{\mu}{\alpha} + \frac{1-\psi}{\mu} \right)`
======== ==========================
Parameters
----------
mu : float
Poission distribution parameter (mu > 0).
alpha : float
Gamma distribution parameter (alpha > 0).
psi : float
Expected proportion of NegativeBinomial variates (0 < psi < 1)
"""
def __init__(self, mu, alpha, psi, *args, **kwargs):
super(ZeroInflatedNegativeBinomial, self).__init__(*args, **kwargs)
self.mu = mu
self.alpha = alpha
self.psi = psi
self.nb = NegativeBinomial.dist(mu, alpha)
self.mode = self.nb.mode
def random(self, point=None, size=None, repeat=None):
mu, alpha, psi = draw_values(
[self.mu, self.alpha, self.psi], point=point)
g = generate_samples(stats.gamma.rvs, alpha, scale=mu / alpha,
dist_shape=self.shape,
size=size)
g[g == 0] = np.finfo(float).eps # Just in case
return stats.poisson.rvs(g) * (np.random.random(np.squeeze(g.shape)) < psi)
def logp(self, value):
return tt.switch(value > 0,
tt.log(self.psi) + self.nb.logp(value),
tt.log((1. - self.psi) + self.psi * (self.alpha / (self.alpha + self.mu))**self.alpha))
| {
"repo_name": "wanderer2/pymc3",
"path": "pymc3/distributions/discrete.py",
"copies": "1",
"size": "18081",
"license": "apache-2.0",
"hash": -6901887302012889000,
"line_mean": 31.8148820327,
"line_max": 174,
"alpha_frac": 0.5003594934,
"autogenerated": false,
"ratio": 3.685487158581329,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9683786775307769,
"avg_score": 0.00041197533471209156,
"num_lines": 551
} |
from functools import partial
import numpy as np
import torch
from six.moves import map, zip
from ..mask.structures import BitmapMasks, PolygonMasks
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of size
count)"""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
def mask2ndarray(mask):
"""Convert Mask to ndarray..
Args:
mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
torch.Tensor or np.ndarray): The mask to be converted.
Returns:
np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
"""
if isinstance(mask, (BitmapMasks, PolygonMasks)):
mask = mask.to_ndarray()
elif isinstance(mask, torch.Tensor):
mask = mask.detach().cpu().numpy()
elif not isinstance(mask, np.ndarray):
raise TypeError(f'Unsupported {type(mask)} data type')
return mask
def flip_tensor(src_tensor, flip_direction):
"""flip tensor base on flip_direction.
Args:
src_tensor (Tensor): input feature map, shape (B, C, H, W).
flip_direction (str): The flipping direction. Options are
'horizontal', 'vertical', 'diagonal'.
Returns:
out_tensor (Tensor): Flipped tensor.
"""
assert src_tensor.ndim == 4
valid_directions = ['horizontal', 'vertical', 'diagonal']
assert flip_direction in valid_directions
if flip_direction == 'horizontal':
out_tensor = torch.flip(src_tensor, [3])
elif flip_direction == 'vertical':
out_tensor = torch.flip(src_tensor, [2])
else:
out_tensor = torch.flip(src_tensor, [2, 3])
return out_tensor
| {
"repo_name": "open-mmlab/mmdetection",
"path": "mmdet/core/utils/misc.py",
"copies": "1",
"size": "2615",
"license": "apache-2.0",
"hash": -1450935934704015600,
"line_mean": 30.130952381,
"line_max": 79,
"alpha_frac": 0.630210325,
"autogenerated": false,
"ratio": 3.8512518409425627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9981462165942563,
"avg_score": 0,
"num_lines": 84
} |
from functools import partial
import numpy as np
import torch.nn as nn
import torch
from unet3d.models.pytorch.classification.decoder import MyronenkoDecoder, MirroredDecoder
from unet3d.models.pytorch.classification.myronenko import MyronenkoEncoder, MyronenkoConvolutionBlock
from unet3d.models.pytorch.classification.resnet import conv1x1x1
class VariationalBlock(nn.Module):
def __init__(self, in_size, n_features, out_size, return_parameters=False):
super(VariationalBlock, self).__init__()
self.n_features = n_features
self.return_parameters = return_parameters
self.dense1 = nn.Linear(in_size, out_features=n_features*2)
self.dense2 = nn.Linear(self.n_features, out_size)
@staticmethod
def reparameterize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def forward(self, x):
x = self.dense1(x)
mu, logvar = torch.split(x, self.n_features, dim=1)
z = self.reparameterize(mu, logvar)
out = self.dense2(z)
if self.return_parameters:
return out, mu, logvar, z
else:
return out, mu, logvar
class ConvolutionalAutoEncoder(nn.Module):
def __init__(self, input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear",
encoder_class=MyronenkoEncoder, decoder_class=None, n_outputs=None, layer_widths=None,
decoder_mirrors_encoder=False, activation=None, use_transposed_convolutions=False, kernel_size=3):
super(ConvolutionalAutoEncoder, self).__init__()
self.base_width = base_width
if encoder_blocks is None:
encoder_blocks = [1, 2, 2, 4]
self.encoder = encoder_class(n_features=n_features, base_width=base_width, layer_blocks=encoder_blocks,
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
layer_widths=layer_widths, kernel_size=kernel_size)
decoder_class, decoder_blocks = self.set_decoder_blocks(decoder_class, encoder_blocks, decoder_mirrors_encoder,
decoder_blocks)
self.decoder = decoder_class(base_width=base_width, layer_blocks=decoder_blocks,
upsampling_scale=downsampling_stride, feature_reduction_scale=feature_dilation,
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
use_transposed_convolutions=use_transposed_convolutions,
kernel_size=kernel_size)
self.set_final_convolution(n_features)
self.set_activation(activation=activation)
def set_final_convolution(self, n_outputs):
self.final_convolution = conv1x1x1(in_planes=self.base_width, out_planes=n_outputs, stride=1)
def set_activation(self, activation):
if activation == "sigmoid":
self.activation = nn.Sigmoid()
elif activation == "softmax":
self.activation = nn.Softmax(dim=1)
else:
self.activation = None
def set_decoder_blocks(self, decoder_class, encoder_blocks, decoder_mirrors_encoder, decoder_blocks):
if decoder_mirrors_encoder:
decoder_blocks = encoder_blocks
if decoder_class is None:
decoder_class = MirroredDecoder
elif decoder_blocks is None:
decoder_blocks = [1] * len(encoder_blocks)
if decoder_class is None:
decoder_class = MyronenkoDecoder
return decoder_class, decoder_blocks
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
x = self.final_convolution(x)
if self.activation is not None:
x = self.activation(x)
return x
class MyronenkoVariationalLayer(nn.Module):
def __init__(self, in_features, input_shape, reduced_features=16, latent_features=128,
conv_block=MyronenkoConvolutionBlock, conv_stride=2, upsampling_mode="trilinear",
align_corners_upsampling=False):
super(MyronenkoVariationalLayer, self).__init__()
self.in_conv = conv_block(in_planes=in_features, planes=reduced_features, stride=conv_stride)
self.reduced_shape = tuple(np.asarray((reduced_features, *np.divide(input_shape, conv_stride)), dtype=np.int))
self.in_size = np.prod(self.reduced_shape, dtype=np.int)
self.var_block = VariationalBlock(in_size=self.in_size, out_size=self.in_size, n_features=latent_features)
self.relu = nn.ReLU(inplace=True)
self.out_conv = conv1x1x1(in_planes=reduced_features, out_planes=in_features, stride=1)
self.upsample = partial(nn.functional.interpolate, scale_factor=conv_stride, mode=upsampling_mode,
align_corners=align_corners_upsampling)
def forward(self, x):
x = self.in_conv(x).flatten(start_dim=1)
x, mu, logvar = self.var_block(x)
x = self.relu(x).view(-1, *self.reduced_shape)
x = self.out_conv(x)
x = self.upsample(x)
return x, mu, logvar
class VariationalAutoEncoder(ConvolutionalAutoEncoder):
def __init__(self, n_reduced_latent_feature_maps=16, vae_features=128, variational_layer=MyronenkoVariationalLayer,
input_shape=None, n_features=1, base_width=32, encoder_blocks=None, decoder_blocks=None,
feature_dilation=2, downsampling_stride=2, interpolation_mode="trilinear", encoder_class=MyronenkoEncoder,
decoder_class=MyronenkoDecoder, n_outputs=None, layer_widths=None, decoder_mirrors_encoder=False,
activation=None, use_transposed_convolutions=False, var_layer_stride=2):
super(VariationalAutoEncoder, self).__init__(input_shape=input_shape, n_features=n_features,
base_width=base_width, encoder_blocks=encoder_blocks,
decoder_blocks=decoder_blocks, feature_dilation=feature_dilation,
downsampling_stride=downsampling_stride,
interpolation_mode=interpolation_mode, encoder_class=encoder_class,
decoder_class=decoder_class, n_outputs=n_outputs, layer_widths=layer_widths,
decoder_mirrors_encoder=decoder_mirrors_encoder,
activation=activation,
use_transposed_convolutions=use_transposed_convolutions)
if vae_features is not None:
depth = len(encoder_blocks) - 1
n_latent_feature_maps = base_width * (feature_dilation ** depth)
latent_image_shape = np.divide(input_shape, downsampling_stride ** depth)
self.var_layer = variational_layer(in_features=n_latent_feature_maps,
input_shape=latent_image_shape,
reduced_features=n_reduced_latent_feature_maps,
latent_features=vae_features,
upsampling_mode=interpolation_mode,
conv_stride=var_layer_stride)
def forward(self, x):
x = self.encoder(x)
x, mu, logvar = self.var_layer(x)
x = self.decoder(x)
x = self.final_convolution(x)
if self.activation is not None:
x = self.activation(x)
return x, mu, logvar
def test(self, x):
x = self.encoder(x)
x, mu, logvar = self.var_layer(x)
x = self.decoder(mu)
x = self.final_convolution(x)
if self.activation is not None:
x = self.activation(x)
return x, mu, logvar
class LabeledVariationalAutoEncoder(VariationalAutoEncoder):
def __init__(self, *args, n_outputs=None, base_width=32, **kwargs):
super().__init__(*args, n_outputs=n_outputs, base_width=base_width, **kwargs)
self.final_convolution = conv1x1x1(in_planes=base_width, out_planes=n_outputs, stride=1)
| {
"repo_name": "ellisdg/3DUnetCNN",
"path": "unet3d/models/pytorch/autoencoder/variational.py",
"copies": "1",
"size": "8509",
"license": "mit",
"hash": 6805105081618989000,
"line_mean": 51.850931677,
"line_max": 129,
"alpha_frac": 0.6028910565,
"autogenerated": false,
"ratio": 3.8889396709323583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4991830727432358,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from deep_np import layers, losses, utils
def _init_fc_weights(in_dim, out_dim, include_bias=True):
weights = np.random.randn(in_dim, out_dim) / np.sqrt(in_dim / 2.)
if include_bias:
return weights, np.zeros((1, out_dim))
return weights
def _init_conv_weights(n_filters, n_channels, filter_size, include_bias=True):
weights = np.random.randn(n_filters, n_channels, filter_size,
filter_size) / np.sqrt(n_filters / 2.)
if include_bias:
return weights, np.zeros((n_filters, 1))
return weights
class NeuralNetwork(object):
def __init__(self, *args, **kwargs):
pass
def predict_proba(self, X):
logits, _ = self.forward(X, train=False)
return utils.softmax(logits)
def predict(self, X):
return np.argmax(self.predict_proba(X), axis=1)
def train_step(self, X_train, y_train):
logits, cache = self.forward(X_train)
loss = losses.cross_entropy(logits, y_train)
grad = self.backward(logits, y_train, cache)
return grad, loss
def forward(self, X, train=True):
raise NotImplementedError()
def backward(self, logits, y_train, cache):
raise NotImplementedError()
# Network for pong policy gradient
class PongNetwork(NeuralNetwork):
def __init__(self, input_dim, hidden_dim=128, n_cls=3):
W1, b1 = _init_fc_weights(input_dim, hidden_dim)
W2, b2 = _init_fc_weights(hidden_dim, n_cls)
self.model = dict(W1=W1, b1=b1, W2=W2, b2=b2)
def forward(self, X, train=True):
h1, h1_cache = layers.fc_forward(X, self.model["W1"], self.model["b1"])
h1, nl1_cache = layers.relu_forward(h1)
logits, logits_cache = layers.fc_forward(h1, self.model["W2"],
self.model["b2"])
return logits, dict(h1=h1_cache, nl1=nl1_cache, logits=logits_cache)
# slightly different API to accomodate policy gradient
def backward(self, grad_y, cache):
dh1, dW2, db2 = layers.fc_backward(grad_y, cache["logits"])
dh1 = layers.relu_backward(dh1, cache["nl1"])
dX, dW1, db1 = layers.fc_backward(dh1, cache["h1"])
grad = dict(W1=dW1, b1=db1, W2=dW2, b2=db2)
return grad
class FeedForwardNetwork(NeuralNetwork):
def __init__(self, in_dim=784, hidden_dim=128, p_dropout=0.7, n_cls=10):
self.p_dropout = p_dropout
W1, b1 = _init_fc_weights(in_dim, hidden_dim)
beta1 = np.ones((1, hidden_dim))
gamma1 = np.ones((1, hidden_dim))
W2, b2 = _init_fc_weights(hidden_dim, hidden_dim)
beta2 = np.ones((1, hidden_dim))
gamma2 = np.ones((1, hidden_dim))
W3, b3 = _init_fc_weights(hidden_dim, n_cls)
self.model = dict(
W1=W1,
b1=b1,
beta1=beta1,
gamma1=gamma1,
W2=W2,
b2=b2,
beta2=beta2,
gamma2=gamma2,
W3=W3,
b3=b3)
self.bn_caches = dict(
b1_mean=np.zeros((1, H)),
b1_var=np.zeros((1, H)),
b2_mean=np.zeros((1, H)),
b2_var=np.zeros((1, H)))
def forward(self, X, train=True):
gamma1, gamma2 = self.model["gamma1"], self.model["gamma2"]
beta1, beta2 = self.model["beta1"], self.model["beta2"]
u1, u2 = None, None
bn1_cache, bn2_cache = None, None
h1, h1_cache = layers.fc_forward(X, self.model["W1"], self.model["b1"])
bn1_cache = (self.bn_caches["bn1_mean"], self.bn_caches["bn1_var"])
h1, bn1_cache, run_mean, run_var = layers.bn_forward(
h1, gamma1, beta1, bn1_cache, train=train)
h1, nl1_cache = layers.relu_forward(h1)
self.bn_caches["bn1_mean"], self.bn1_mean["bn1_var"] = run_mean, run_var
if train:
h1, u1 = layers.dropout_forward(h1, self.p_dropout)
h2, h2_cache = layers.fc_forward(X, self.model["W2"], self.model["b2"])
bn2_cache = (self.bn_caches["bn2_mean"], self.bn_caches["bn2_var"])
h2, bn2_cache, run_mean, run_var = layers.bn_forward(
h2, gamma2, beta2, bn2_cache, train=train)
h2, nl2_cache = layers.relu_forward(h2)
self.bn_caches["bn2_mean"], self.bn2_mean["bn2_var"] = run_mean, run_var
if train:
h2, u2 = layers.dropout_forward(h2, self.p_dropout)
logits, logits_cache = layers.fc_forward(h2, self.model["W3"],
self.model["b3"])
return logits, dict(
X=X,
h1=h1_cache,
h2=h2_cache,
logits=logits_cache,
nl1=nl1_cache,
nl2=nl2_cache,
u1=u1,
u2=u2,
bn1=bn1_cache,
b2=bn2_cache)
def backward(self, logits, y_train, cache):
grad_y = loss.dcross_entropy(logits, y_train)
dh2, dW3, db3 = layers.fc_backward(grad_y, cache["logits"])
dh2 = layers.relu_backward(dh2, cache["nl2"])
dh2 = layers.dropout_backward(dh2, cache["u2"])
dh2, dgamma2, dbeta2 = layers.bn_backward(dh2, cache["bn2"])
dh1, dW2, db2 = layers.fc_backward(dh2, h2_cache)
dh1 = self.relu_backward(dh1, cache["nl1"])
dh1 = layers.dropout_backward(dh1, cache["u1"])
dh1, dgamma1, dbeta1 = layers.bn_backward(dh1, cache["bn1"])
_, dW1, db1 = layers.fc_backward(dh1, cache["h1"])
return dict(
W1=dW1,
b1=db1,
W2=dW2,
b2=db2,
W3=dW3,
b3=db3,
gamma1=dgamma1,
beta1=dbeta1,
gamma2=dgamma2,
beta2=dbeta2)
class ConvolutionalNetwork(NeuralNetwork):
def __init__(self,
img_size=28,
filter_size=3,
pool_size=2,
n_channels=1,
n_filters=10,
n_cls=10,
hidden_dim=128):
super().__init__()
pool_out_dim = n_filters * img_size // pool_size * img_size // pool_size
W1, b1 = _init_conv_weights(n_filters, n_channels, filter_size)
W2, b2 = _init_fc_weights(pool_out_dim, hidden_dim)
W3, b3 = _init_fc_weights(hidden_dim, n_cls)
self.model = dict(W1=W1, b1=b1, W2=W2, b2=b2, W3=W3, b3=b3)
def forward(self, X_train, train=False):
h1, h1_cache = layers.conv_forward(X_train, self.model["W1"],
self.model["b1"])
h1, nl1_cache = layers.relu_forward(h1)
hpool, hpool_cache = layers.maxpool_forward(h1)
h2 = hpool.ravel().reshape(X_train.shape[0], -1)
h3, h3_cache = layers.fc_forward(h2, self.model["W2"], self.model["b2"])
h3, nl3_cache = layers.relu_forward(h3)
logits, logits_cache = layers.fc_forward(h3, self.model["W3"],
self.model["b3"])
return (logits, dict(
h1=h1_cache,
nl1=nl1_cache,
hpool=hpool_cache,
hpool_shape=hpool.shape,
h3=h3_cache,
nl3=nl3_cache,
logits=logits_cache))
def backward(self, logits, y_train, cache):
grad_y = losses.dcross_entropy(logits, y_train)
# FC-7
dh3, dW3, db3 = layers.fc_backward(grad_y, cache["logits"])
dh3 = layers.relu_backward(dh3, cache["nl3"])
dh2, dW2, db2 = layers.fc_backward(dh3, cache["h3"])
dh2 = dh2.ravel().reshape(cache["hpool_shape"])
# Pool-1
dpool = layers.maxpool_backward(dh2, cache["hpool"])
# Conv-1
dh1 = layers.relu_backward(dpool, cache["nl1"])
dX, dW1, db1 = layers.conv_backward(dh1, cache["h1"])
grad = dict(W1=dW1, W2=dW2, W3=dW3, b1=db1, b2=db2, b3=db3)
return grad
class RecurrentNetwork(NeuralNetwork):
def __init__(self, vocab_size, char2idx, idx2char, hidden_dim=128):
self.vocab_size = vocab_size
self.char2idx = char2idx
self.idx2char = idx2char
self.hidden_dim = hidden_dim
Wxh = _init_fc_weights(vocab_size, hidden_dim, include_bias=False)
Whh, bh = _init_fc_weights(hidden_dim, hidden_dim)
Why, by = _init_fc_weights(hidden_dim, vocab_size)
self.model = dict(Wxh=Wxh, Whh=Whh, bh=bh, Why=Why, by=by)
def init_hidden_state(self):
return np.zeros((1, self.hidden_dim))
def forward(self, X, h, train=True):
Wxh, Whh, Why = self.model['Wxh'], self.model['Whh'], self.model['Why']
bh, by = self.model['bh'], self.model['by']
X_one_hot = np.zeros(self.vocab_size)
X_one_hot[X] = 1.
X_one_hot = X_one_hot.reshape(1, -1)
hprev = h.copy()
h, h_cache = layers.tanh_forward(X_one_hot @ Wxh + hprev @ Whh + bh)
y, y_cache = layers.fc_forward(h, Why, by)
cache = (X_one_hot, Whh, h, hprev, y, h_cache, y_cache)
if not train:
y = utils.softmax(y)
return y, h, cache
def backward(self, logits, y_train, dh_next, cache):
X, Whh, h, hprev, y, h_cache, y_cache = cache
dy = losses.dcross_entropy(logits, y_train)
dh, dWhy, dby = layers.fc_backward(dy, y_cache)
dh += dh_next
dby = dby.reshape((1, -1))
dh = layers.tanh_backward(dh, h_cache)
dbh = dh
dWhh = hprev.T @ dh
dWxh = X.T @ dh
dh_next = dh @ Whh.T
grad = dict(Wxh=dWxh, Whh=dWhh, Why=dWhy, bh=dbh, by=dby)
return grad, dh_next
def train_step(self, X_batch, y_batch, h):
logits_batch = []
caches = []
loss = 0.
for x, y in zip(X_batch, y_batch):
logits, h, cache = self.forward(x, h, train=True)
loss += losses.cross_entropy(logits, y)
logits_batch.append(logits)
caches.append(cache)
loss /= X_batch.shape[0]
dh_next = self.init_hidden_state()
grads = {k: np.zeros_like(v) for k, v in self.model.items()}
for t in reversed(range(len(X_batch))):
grad, dh_next = self.backward(logits_batch[t], y_batch[t], dh_next,
caches[t])
for k in grads.keys():
grads[k] += grad[k]
for k, v in grads.items():
grads[k] = np.clip(v, 5., -5.)
return grads, loss, h
def sample(self, X_seed, h, size=100):
chars = [self.idx2char[X_seed]]
idx_list = list(range(self.vocab_size))
X = X_seed
for _ in range(size - 1):
prob, h, _ = self.forward(X, h, train=False)
idx = np.random.choice(idx_list, p=prob.ravel())
chars.append(self.idx2char[idx])
X = idx
return ''.join(chars)
class LSTM(RecurrentNetwork):
def __init__(self, vocab_size, char2idx, idx2char, hidden_dim=128):
self.vocab_size = vocab_size
self.char2idx = char2idx
self.idx2char = idx2char
self.hidden_dim = hidden_dim
Wf, bf = _init_fc_weights(vocab_size + hidden_dim, hidden_dim)
Wi, bi = _init_fc_weights(vocab_size + hidden_dim, hidden_dim)
Wc, bc = _init_fc_weights(vocab_size + hidden_dim, hidden_dim)
Wo, bo = _init_fc_weights(vocab_size + hidden_dim, hidden_dim)
Wy, by = _init_fc_weights(hidden_dim, vocab_size)
self.model = dict(
Wf=Wf, bf=bf, Wi=Wi, bi=bi, Wc=Wc, bc=bc, Wo=Wo, bo=bo, Wy=Wy, by=by)
def init_hidden_state(self):
return (np.zeros((1, self.hidden_dim)), np.zeros((1, self.hidden_dim)))
def forward(self, X, state, train=True):
h_old, c_old = state
X_one_hot = np.zeros(self.vocab_size)
X_one_hot[X] = 1.
X_one_hot = X_one_hot.reshape(1, -1)
X = np.column_stack((h_old, X_one_hot))
hf, hf_cache = layers.fc_forward(X, self.model["Wf"], self.model["bf"])
hf, hf_sigm_cache = layers.sigmoid_forward(hf)
hi, hi_cache = layers.fc_forward(X, self.model["Wi"], self.model["bi"])
hi, hi_sigm_cache = layers.sigmoid_forward(hi)
ho, ho_cache = layers.fc_forward(X, self.model["Wo"], self.model["bo"])
ho, ho_sigm_cache = layers.sigmoid_forward(ho)
hc, hc_cache = layers.fc_forward(X, self.model["Wc"], self.model["bc"])
hc, hc_tanh_cache = layers.tanh_forward(hc)
c = hf * c_old + hi * hc
c, c_tanh_cache = layers.tanh_forward(c)
h = ho * c
y, y_cache = layers.fc_forward(h, self.model["Wy"], self.model["by"])
cache = (X, hf, hi, ho, hc, hf_cache, hf_sigm_cache, hi_cache,
hi_sigm_cache, ho_cache, ho_sigm_cache, hc_cache, hc_tanh_cache,
c_old, c, c_tanh_cache, y_cache)
if not train:
y = utils.softmax(y)
return y, (h, c), cache
def backward(self, logits, y_train, d_next, cache):
X, hf, hi, ho, hc, hf_cache, hf_sigm_cache, hi_cache, hi_sigm_cache, ho_cache, \
ho_sigm_cache, hc_cache, hc_tanh_cache, c_old, c, c_tanh_cache, y_cache = cache
dh_next, dc_next = d_next
dy = losses.dcross_entropy(logits, y_train)
dh, dWy, dby = layers.fc_backward(dy, y_cache)
dh += dh_next
dho = c * dh
dho = layers.sigmoid_backward(dho, ho_sigm_cache)
dc = ho * dh
dc = layers.tanh_backward(dc, c_tanh_cache)
dc = dc + dc_next
dhf = c_old * dc
dhf = layers.sigmoid_backward(dhf, hf_sigm_cache)
dhi = hc * dc
dhi = layers.sigmoid_backward(dhi, hi_sigm_cache)
dhc = hi * dc
dhc = layers.tanh_backward(dhc, hc_tanh_cache)
dXo, dWo, dbo = layers.fc_backward(dho, ho_cache)
dXc, dWc, dbc = layers.fc_backward(dhc, hc_cache)
dXi, dWi, dbi = layers.fc_backward(dhi, hi_cache)
dXf, dWf, dbf = layers.fc_backward(dhf, hf_cache)
dX = dXo + dXc + dXi + dXf
dh_next = dX[:, :self.hidden_dim]
dc_next = hf * dc
grad = dict(Wf=dWf, Wi=dWi, Wc=dWc, Wo=dWo, Wy=dWy, bf=dbf, bi=dbi, bc=dbc, bo=dbo, by=dby)
return grad, (dh_next, dc_next)
def train_step(self, X_batch, y_batch, state):
logits_batch = []
caches = []
loss = 0.
for x, y_true in zip(X_batch, y_batch):
logits, state, cache = self.forward(x, state, train=True)
loss += losses.cross_entropy(logits, y_true)
logits_batch.append(logits)
caches.append(cache)
loss /= X_batch.shape[0]
# backward
d_next = self.init_hidden_state()
grads = {k: np.zeros_like(v) for k, v in self.model.items()}
for y_pred, y_true, cache in reversed(list(zip(logits_batch, y_batch, caches))):
grad, d_next = self.backward(y_pred, y_true, d_next, cache)
for k in grads.keys():
grads[k] += grad[k]
for k, v in grads.items():
grads[k] = np.clip(v, -5., 5.)
return grads, loss, state
| {
"repo_name": "teasherm/models",
"path": "deep_np/nets.py",
"copies": "1",
"size": "13874",
"license": "unlicense",
"hash": 1561426207700936400,
"line_mean": 29.8311111111,
"line_max": 95,
"alpha_frac": 0.5960789967,
"autogenerated": false,
"ratio": 2.714006259780908,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8792684086224305,
"avg_score": 0.003480234051320513,
"num_lines": 450
} |
from functools import partial
import numpy as np
from devito.core.operator import CoreOperator, CustomOperator
from devito.exceptions import InvalidOperator
from devito.passes.equations import buffering, collect_derivatives
from devito.passes.clusters import (Lift, blocking, cire, cse, eliminate_arrays,
extract_increments, factorize, fuse, optimize_pows)
from devito.passes.iet import (CTarget, OmpTarget, avoid_denormals, mpiize,
optimize_halospots, hoist_prodders, relax_incr_dimensions)
from devito.tools import timed_pass
__all__ = ['Cpu64NoopCOperator', 'Cpu64NoopOmpOperator', 'Cpu64AdvCOperator',
'Cpu64AdvOmpOperator', 'Cpu64FsgCOperator', 'Cpu64FsgOmpOperator',
'Cpu64CustomOperator']
class Cpu64OperatorMixin(object):
BLOCK_LEVELS = 1
"""
Loop blocking depth. So, 1 => "blocks", 2 => "blocks" and "sub-blocks",
3 => "blocks", "sub-blocks", and "sub-sub-blocks", ...
"""
CIRE_MINCOST_INV = 50
"""
Minimum operation count of a Dimension-invariant aliasing expression to be
optimized away. Dimension-invariant aliases are lifted outside of one or more
invariant loop(s), so they require tensor temporaries that can be potentially
very large (e.g., the whole domain in the case of time-invariant aliases).
"""
CIRE_MINCOST_SOPS = 10
"""
Minimum operation count of a sum-of-product aliasing expression to be optimized away.
"""
PAR_COLLAPSE_NCORES = 4
"""
Use a collapse clause if the number of available physical cores is greater
than this threshold.
"""
PAR_COLLAPSE_WORK = 100
"""
Use a collapse clause if the trip count of the collapsable loops is statically
known to exceed this threshold.
"""
PAR_CHUNK_NONAFFINE = 3
"""
Coefficient to adjust the chunk size in non-affine parallel loops.
"""
PAR_DYNAMIC_WORK = 10
"""
Use dynamic scheduling if the operation count per iteration exceeds this
threshold. Otherwise, use static scheduling.
"""
PAR_NESTED = 2
"""
Use nested parallelism if the number of hyperthreads per core is greater
than this threshold.
"""
@classmethod
def _normalize_kwargs(cls, **kwargs):
o = {}
oo = kwargs['options']
# Execution modes
o['openmp'] = oo.pop('openmp')
o['mpi'] = oo.pop('mpi')
o['parallel'] = o['openmp'] # Backwards compatibility
# Buffering
o['buf-async-degree'] = oo.pop('buf-async-degree', None)
# Blocking
o['blockinner'] = oo.pop('blockinner', False)
o['blocklevels'] = oo.pop('blocklevels', cls.BLOCK_LEVELS)
o['skewing'] = oo.pop('skewing', False)
# CIRE
o['min-storage'] = oo.pop('min-storage', False)
o['cire-rotate'] = oo.pop('cire-rotate', False)
o['cire-maxpar'] = oo.pop('cire-maxpar', False)
o['cire-maxalias'] = oo.pop('cire-maxalias', False)
o['cire-ftemps'] = oo.pop('cire-ftemps', False)
o['cire-mincost'] = {
'invariants': {
'scalar': np.inf,
'tensor': oo.pop('cire-mincost-inv', cls.CIRE_MINCOST_INV),
},
'sops': oo.pop('cire-mincost-sops', cls.CIRE_MINCOST_SOPS)
}
# Shared-memory parallelism
o['par-collapse-ncores'] = oo.pop('par-collapse-ncores', cls.PAR_COLLAPSE_NCORES)
o['par-collapse-work'] = oo.pop('par-collapse-work', cls.PAR_COLLAPSE_WORK)
o['par-chunk-nonaffine'] = oo.pop('par-chunk-nonaffine', cls.PAR_CHUNK_NONAFFINE)
o['par-dynamic-work'] = oo.pop('par-dynamic-work', cls.PAR_DYNAMIC_WORK)
o['par-nested'] = oo.pop('par-nested', cls.PAR_NESTED)
# Recognised but unused by the CPU backend
oo.pop('par-disabled', None)
oo.pop('gpu-direct', None)
oo.pop('gpu-fit', None)
if oo:
raise InvalidOperator("Unrecognized optimization options: [%s]"
% ", ".join(list(oo)))
kwargs['options'].update(o)
return kwargs
# Mode level
class Cpu64NoopOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Distributed-memory parallelism
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# Shared-memory parallelism
if options['openmp']:
parizer = cls._Target.Parizer(sregistry, options, platform)
parizer.make_parallel(graph)
parizer.initialize(graph)
# Symbol definitions
cls._Target.DataManager(sregistry).process(graph)
return graph
class Cpu64AdvOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.DSL')
def _specialize_dsl(cls, expressions, **kwargs):
expressions = collect_derivatives(expressions)
return expressions
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Blocking to improve data locality
clusters = blocking(clusters, options)
# Reduce flops (potential arithmetic alterations)
clusters = extract_increments(clusters, sregistry)
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# The previous passes may have created fusion opportunities, which in
# turn may enable further optimizations
clusters = fuse(clusters)
clusters = eliminate_arrays(clusters)
# Reduce flops (no arithmetic alterations)
clusters = cse(clusters, sregistry)
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Flush denormal numbers
avoid_denormals(graph)
# Distributed-memory parallelism
optimize_halospots(graph)
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# Lower IncrDimensions so that blocks of arbitrary shape may be used
relax_incr_dimensions(graph, sregistry=sregistry)
# Parallelism
parizer = cls._Target.Parizer(sregistry, options, platform)
parizer.make_simd(graph)
parizer.make_parallel(graph)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
cls._Target.DataManager(sregistry).process(graph)
# Initialize the target-language runtime
parizer.initialize(graph)
return graph
class Cpu64FsgOperator(Cpu64AdvOperator):
"""
Operator with performance optimizations tailored "For small grids" ("Fsg").
"""
@classmethod
def _normalize_kwargs(cls, **kwargs):
kwargs = super()._normalize_kwargs(**kwargs)
if kwargs['options']['min-storage']:
raise InvalidOperator('You should not use `min-storage` with `advanced-fsg '
' as they work in opposite directions')
return kwargs
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Reduce flops (potential arithmetic alterations)
clusters = extract_increments(clusters, sregistry)
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# The previous passes may have created fusion opportunities, which in
# turn may enable further optimizations
clusters = fuse(clusters)
clusters = eliminate_arrays(clusters)
# Reduce flops (no arithmetic alterations)
clusters = cse(clusters, sregistry)
# Blocking to improve data locality
clusters = blocking(clusters, options)
return clusters
class Cpu64CustomOperator(Cpu64OperatorMixin, CustomOperator):
_Target = OmpTarget
@classmethod
def _make_dsl_passes_mapper(cls, **kwargs):
return {
'collect-derivs': collect_derivatives,
}
@classmethod
def _make_exprs_passes_mapper(cls, **kwargs):
options = kwargs['options']
# This callback simply mimics `is_on_device`, used in the device backends.
# It's used by `buffering` to replace `save!=None` TimeFunctions with buffers
def callback(f):
if f.is_TimeFunction and f.save is not None:
return [f.time_dim]
else:
return None
return {
'buffering': lambda i: buffering(i, callback, options)
}
@classmethod
def _make_clusters_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
return {
'blocking': lambda i: blocking(i, options),
'factorize': factorize,
'fuse': fuse,
'lift': lambda i: Lift().process(cire(i, 'invariants', sregistry,
options, platform)),
'cire-sops': lambda i: cire(i, 'sops', sregistry, options, platform),
'cse': lambda i: cse(i, sregistry),
'opt-pows': optimize_pows,
'topofuse': lambda i: fuse(i, toposort=True)
}
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
parizer = cls._Target.Parizer(sregistry, options, platform)
return {
'denormals': avoid_denormals,
'optcomms': optimize_halospots,
'blocking': partial(relax_incr_dimensions, sregistry=sregistry),
'parallel': parizer.make_parallel,
'openmp': parizer.make_parallel,
'mpi': partial(mpiize, mode=options['mpi']),
'simd': partial(parizer.make_simd),
'prodders': hoist_prodders,
'init': parizer.initialize
}
_known_passes = (
# DSL
'collect-derivs',
# Expressions
'buffering',
# Clusters
'blocking', 'topofuse', 'fuse', 'factorize', 'cire-sops', 'cse', 'lift',
'opt-pows',
# IET
'denormals', 'optcomms', 'openmp', 'mpi', 'simd', 'prodders',
)
_known_passes_disabled = ('tasking', 'streaming', 'gpu-direct', 'openacc')
assert not (set(_known_passes) & set(_known_passes_disabled))
# Language level
class Cpu64NoopCOperator(Cpu64NoopOperator):
_Target = CTarget
class Cpu64NoopOmpOperator(Cpu64NoopOperator):
_Target = OmpTarget
class Cpu64AdvCOperator(Cpu64AdvOperator):
_Target = CTarget
class Cpu64AdvOmpOperator(Cpu64AdvOperator):
_Target = OmpTarget
class Cpu64FsgCOperator(Cpu64FsgOperator):
_Target = CTarget
class Cpu64FsgOmpOperator(Cpu64FsgOperator):
_Target = OmpTarget
| {
"repo_name": "opesci/devito",
"path": "devito/core/cpu.py",
"copies": "1",
"size": "12179",
"license": "mit",
"hash": 3881410776799414000,
"line_mean": 31.05,
"line_max": 89,
"alpha_frac": 0.6213974875,
"autogenerated": false,
"ratio": 3.8638959390862944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4985293426586294,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from devito.core.operator import CoreOperator, CustomOperator
from devito.exceptions import InvalidOperator
from devito.passes.equations import collect_derivatives, buffering
from devito.passes.clusters import (Lift, Streaming, Tasker, blocking, cire, cse,
eliminate_arrays, extract_increments, factorize,
fuse, optimize_pows)
from devito.passes.iet import (DeviceOmpTarget, DeviceAccTarget, optimize_halospots,
mpiize, hoist_prodders, is_on_device)
from devito.tools import as_tuple, timed_pass
__all__ = ['DeviceNoopOperator', 'DeviceAdvOperator', 'DeviceCustomOperator',
'DeviceNoopOmpOperator', 'DeviceAdvOmpOperator', 'DeviceFsgOmpOperator',
'DeviceCustomOmpOperator', 'DeviceNoopAccOperator', 'DeviceAdvAccOperator',
'DeviceFsgAccOperator', 'DeviceCustomAccOperator']
class DeviceOperatorMixin(object):
BLOCK_LEVELS = 1
"""
Loop blocking depth. So, 1 => "blocks", 2 => "blocks" and "sub-blocks",
3 => "blocks", "sub-blocks", and "sub-sub-blocks", ...
"""
CIRE_MINCOST_INV = 50
"""
Minimum operation count of a Dimension-invariant aliasing expression to be
optimized away. Dimension-invariant aliases are lifted outside of one or more
invariant loop(s), so they require tensor temporaries that can be potentially
very large (e.g., the whole domain in the case of time-invariant aliases).
"""
CIRE_MINCOST_SOPS = 10
"""
Minimum operation count of a sum-of-product aliasing expression to be optimized away.
"""
PAR_CHUNK_NONAFFINE = 3
"""
Coefficient to adjust the chunk size in non-affine parallel loops.
"""
GPU_FIT = 'all-fallback'
"""
Assuming all functions fit into the gpu memory.
"""
@classmethod
def _normalize_kwargs(cls, **kwargs):
o = {}
oo = kwargs['options']
# Execution modes
o['mpi'] = oo.pop('mpi')
o['parallel'] = True
# Buffering
o['buf-async-degree'] = oo.pop('buf-async-degree', None)
# Blocking
o['blockinner'] = oo.pop('blockinner', True)
o['blocklevels'] = oo.pop('blocklevels', cls.BLOCK_LEVELS)
# CIRE
o['min-storage'] = False
o['cire-rotate'] = False
o['cire-maxpar'] = oo.pop('cire-maxpar', True)
o['cire-maxalias'] = oo.pop('cire-maxalias', False)
o['cire-ftemps'] = oo.pop('cire-ftemps', False)
o['cire-mincost'] = {
'invariants': {
'scalar': 1,
'tensor': oo.pop('cire-mincost-inv', cls.CIRE_MINCOST_INV),
},
'sops': oo.pop('cire-mincost-sops', cls.CIRE_MINCOST_SOPS)
}
# GPU parallelism
o['par-collapse-ncores'] = 1 # Always use a collapse clause
o['par-collapse-work'] = 1 # Always use a collapse clause
o['par-chunk-nonaffine'] = oo.pop('par-chunk-nonaffine', cls.PAR_CHUNK_NONAFFINE)
o['par-dynamic-work'] = np.inf # Always use static scheduling
o['par-nested'] = np.inf # Never use nested parallelism
o['par-disabled'] = oo.pop('par-disabled', True) # No host parallelism by default
o['gpu-direct'] = oo.pop('gpu-direct', True)
o['gpu-fit'] = as_tuple(oo.pop('gpu-fit', cls._normalize_gpu_fit(**kwargs)))
if oo:
raise InvalidOperator("Unsupported optimization options: [%s]"
% ", ".join(list(oo)))
kwargs['options'].update(o)
return kwargs
@classmethod
def _normalize_gpu_fit(cls, **kwargs):
if any(i in kwargs['mode'] for i in ['tasking', 'streaming']):
return None
else:
return cls.GPU_FIT
# Mode level
class DeviceNoopOperator(DeviceOperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Distributed-memory parallelism
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# GPU parallelism
parizer = cls._Target.Parizer(sregistry, options, platform)
parizer.make_parallel(graph)
# Symbol definitions
cls._Target.DataManager(sregistry, options).process(graph)
# Initialize the target-language runtime
parizer.initialize(graph)
return graph
class DeviceAdvOperator(DeviceOperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.DSL')
def _specialize_dsl(cls, expressions, **kwargs):
expressions = collect_derivatives(expressions)
return expressions
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Reduce flops (potential arithmetic alterations)
clusters = extract_increments(clusters, sregistry)
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# Reduce flops (no arithmetic alterations)
clusters = cse(clusters, sregistry)
# Lifting may create fusion opportunities, which in turn may enable
# further optimizations
clusters = fuse(clusters)
clusters = eliminate_arrays(clusters)
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Distributed-memory parallelism
optimize_halospots(graph)
if options['mpi']:
mpiize(graph, mode=options['mpi'])
# GPU parallelism
parizer = cls._Target.Parizer(sregistry, options, platform)
parizer.make_parallel(graph)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
cls._Target.DataManager(sregistry, options).process(graph)
# Initialize the target-language runtime
parizer.initialize(graph)
# TODO: This should be moved right below the `mpiize` pass, but currently calling
# `make_gpudirect` before Symbol definitions` block would create Blocks before
# creating C variables. That would lead to MPI_Request variables being local to
# their blocks. This way, it would generate incorrect C code.
if options['gpu-direct']:
parizer.make_gpudirect(graph)
return graph
class DeviceFsgOperator(DeviceAdvOperator):
"""
Operator with performance optimizations tailored "For small grids" ("Fsg").
"""
# Note: currently mimics DeviceAdvOperator. Will see if this will change
# in the future
pass
class DeviceCustomOperator(DeviceOperatorMixin, CustomOperator):
@classmethod
def _make_dsl_passes_mapper(cls, **kwargs):
return {
'collect-derivs': collect_derivatives,
}
@classmethod
def _make_exprs_passes_mapper(cls, **kwargs):
options = kwargs['options']
# This callback is used by `buffering` to replace host Functions with
# Arrays, used as device buffers for streaming-in and -out of data
def callback(f):
if not is_on_device(f, options['gpu-fit']):
return [f.time_dim]
else:
return None
return {
'buffering': lambda i: buffering(i, callback, options)
}
@classmethod
def _make_clusters_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
runs_on_host, reads_if_on_host = make_callbacks(options)
return {
'blocking': lambda i: blocking(i, options),
'tasking': Tasker(runs_on_host).process,
'streaming': Streaming(reads_if_on_host).process,
'factorize': factorize,
'fuse': fuse,
'lift': lambda i: Lift().process(cire(i, 'invariants', sregistry,
options, platform)),
'cire-sops': lambda i: cire(i, 'sops', sregistry, options, platform),
'cse': lambda i: cse(i, sregistry),
'opt-pows': optimize_pows,
'topofuse': lambda i: fuse(i, toposort=True)
}
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
parizer = cls._Target.Parizer(sregistry, options, platform)
orchestrator = cls._Target.Orchestrator(sregistry)
return {
'optcomms': partial(optimize_halospots),
'parallel': parizer.make_parallel,
'orchestrate': partial(orchestrator.process),
'mpi': partial(mpiize, mode=options['mpi']),
'prodders': partial(hoist_prodders),
'gpu-direct': partial(parizer.make_gpudirect),
'init': parizer.initialize
}
_known_passes = (
# DSL
'collect-derivs',
# Expressions
'buffering',
# Clusters
'blocking', 'tasking', 'streaming', 'factorize', 'fuse', 'lift',
'cire-sops', 'cse', 'opt-pows', 'topofuse',
# IET
'optcomms', 'orchestrate', 'parallel', 'mpi', 'prodders', 'gpu-direct'
)
_known_passes_disabled = ('denormals', 'simd')
assert not (set(_known_passes) & set(_known_passes_disabled))
# Language level
# OpenMP
class DeviceOmpOperatorMixin(object):
_Target = DeviceOmpTarget
@classmethod
def _normalize_kwargs(cls, **kwargs):
oo = kwargs['options']
oo.pop('openmp', None) # It may or may not have been provided
kwargs = super()._normalize_kwargs(**kwargs)
oo['openmp'] = True
return kwargs
class DeviceNoopOmpOperator(DeviceOmpOperatorMixin, DeviceNoopOperator):
pass
class DeviceAdvOmpOperator(DeviceOmpOperatorMixin, DeviceAdvOperator):
pass
class DeviceFsgOmpOperator(DeviceOmpOperatorMixin, DeviceFsgOperator):
pass
class DeviceCustomOmpOperator(DeviceOmpOperatorMixin, DeviceCustomOperator):
_known_passes = DeviceCustomOperator._known_passes + ('openmp',)
assert not (set(_known_passes) & set(DeviceCustomOperator._known_passes_disabled))
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
mapper = super()._make_iet_passes_mapper(**kwargs)
mapper['openmp'] = mapper['parallel']
return mapper
# OpenACC
class DeviceAccOperatorMixin(object):
_Target = DeviceAccTarget
@classmethod
def _normalize_kwargs(cls, **kwargs):
oo = kwargs['options']
oo.pop('openmp', None)
kwargs = super()._normalize_kwargs(**kwargs)
oo['openacc'] = True
return kwargs
class DeviceNoopAccOperator(DeviceAccOperatorMixin, DeviceNoopOperator):
pass
class DeviceAdvAccOperator(DeviceAccOperatorMixin, DeviceAdvOperator):
pass
class DeviceFsgAccOperator(DeviceAccOperatorMixin, DeviceFsgOperator):
pass
class DeviceCustomAccOperator(DeviceAccOperatorMixin, DeviceCustomOperator):
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
mapper = super()._make_iet_passes_mapper(**kwargs)
mapper['openacc'] = mapper['parallel']
return mapper
_known_passes = DeviceCustomOperator._known_passes + ('openacc',)
assert not (set(_known_passes) & set(DeviceCustomOperator._known_passes_disabled))
# Utils
def make_callbacks(options):
"""
Options-dependent callbacks used by various compiler passes.
"""
def is_on_host(f):
return not is_on_device(f, options['gpu-fit'])
def runs_on_host(c):
# The only situation in which a Cluster doesn't get offloaded to
# the device is when it writes to a host Function
return any(is_on_host(f) for f in c.scope.writes)
def reads_if_on_host(c):
if not runs_on_host(c):
return [f for f in c.scope.reads if is_on_host(f)]
else:
return []
return runs_on_host, reads_if_on_host
| {
"repo_name": "opesci/devito",
"path": "devito/core/gpu.py",
"copies": "1",
"size": "12835",
"license": "mit",
"hash": -4925338677373059000,
"line_mean": 30.5356265356,
"line_max": 90,
"alpha_frac": 0.6247760031,
"autogenerated": false,
"ratio": 3.900030385900942,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5024806389000942,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from ...external.qt import QtGui
from ...external.qt.QtCore import Qt
from ...core import message as msg
from ...core import Data
from ...core.callback_property import add_callback
from ...clients.histogram_client import HistogramClient
from ..ui.histogramwidget import Ui_HistogramWidget
from ..glue_toolbar import GlueToolbar
from ..mouse_mode import HRangeMode
from .data_viewer import DataViewer
from .mpl_widget import MplWidget
from ..qtutil import pretty_number
WARN_SLOW = 10000000
def connect_int_spin(client, prop, widget):
add_callback(client, prop, widget.setValue)
widget.valueChanged.connect(partial(setattr, client, prop))
def _hash(x):
return str(id(x))
class HistogramWidget(DataViewer):
LABEL = "Histogram"
def __init__(self, data, parent=None):
super(HistogramWidget, self).__init__(self, parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self.option_widget = QtGui.QWidget()
self.ui = Ui_HistogramWidget()
self.ui.setupUi(self.option_widget)
self._tweak_geometry()
self.client = HistogramClient(data,
self.central_widget.canvas.fig,
artist_container=self._container)
validator = QtGui.QDoubleValidator(None)
validator.setDecimals(7)
self.ui.xmin.setValidator(validator)
self.ui.xmax.setValidator(validator)
lo, hi = self.client.xlimits
self.ui.xmin.setText(str(lo))
self.ui.xmax.setText(str(hi))
self.make_toolbar()
self._connect()
self._data = data
self._component_hashes = {} # maps _hash(componentID) -> componentID
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
def _connect(self):
ui = self.ui
cl = self.client
ui.attributeCombo.currentIndexChanged.connect(
self._set_attribute_from_combo)
ui.attributeCombo.currentIndexChanged.connect(
self._update_minmax_labels)
connect_int_spin(cl, 'nbins', ui.binSpinBox)
ui.normalized_box.toggled.connect(partial(setattr, cl, 'normed'))
ui.autoscale_box.toggled.connect(partial(setattr, cl, 'autoscale'))
ui.cumulative_box.toggled.connect(partial(setattr, cl, 'cumulative'))
ui.xlog_box.toggled.connect(partial(setattr, cl, 'xlog'))
ui.ylog_box.toggled.connect(partial(setattr, cl, 'ylog'))
ui.xmin.editingFinished.connect(self._set_limits)
ui.xmax.editingFinished.connect(self._set_limits)
def _set_limits(self):
lo = float(self.ui.xmin.text())
hi = float(self.ui.xmax.text())
self.client.xlimits = lo, hi
def _update_minmax_labels(self):
lo, hi = pretty_number(self.client.xlimits)
self.ui.xmin.setText(lo)
self.ui.xmax.setText(hi)
def make_toolbar(self):
result = GlueToolbar(self.central_widget.canvas, self,
name='Histogram')
for mode in self._mouse_modes():
result.add_mode(mode)
self.addToolBar(result)
return result
def _mouse_modes(self):
axes = self.client.axes
rect = HRangeMode(axes, roi_callback=self.apply_roi)
return [rect]
def apply_roi(self, mode):
roi = mode.roi()
self.client.apply_roi(roi)
def _update_attributes(self):
"""Repopulate the combo box that selects the quantity to plot"""
combo = self.ui.attributeCombo
component = self.component
combo.blockSignals(True)
combo.clear()
#implementation note:
#PySide doesn't robustly store python objects with setData
#use _hash(x) instead
model = QtGui.QStandardItemModel()
data_ids = set(_hash(d) for d in self._data)
self._component_hashes = {_hash(c): c for d in self._data
for c in d.components}
for d in self._data:
if d not in self._container:
continue
item = QtGui.QStandardItem(d.label)
item.setData(_hash(d), role=Qt.UserRole)
assert item.data(Qt.UserRole) == _hash(d)
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
model.appendRow(item)
for c in d.visible_components:
if not d.get_component(c).numeric:
continue
item = QtGui.QStandardItem(c.label)
item.setData(_hash(c), role=Qt.UserRole)
model.appendRow(item)
combo.setModel(model)
#separators below data items
for i in range(combo.count()):
if combo.itemData(i) in data_ids:
combo.insertSeparator(i + 1)
combo.blockSignals(False)
if component is not None:
self.component = component
else:
combo.setCurrentIndex(2) # skip first data + separator
self._set_attribute_from_combo()
@property
def component(self):
combo = self.ui.attributeCombo
index = combo.currentIndex()
return self._component_hashes.get(combo.itemData(index), None)
@component.setter
def component(self, component):
combo = self.ui.attributeCombo
#combo.findData doesn't seem to work in ...external.qt
for i in range(combo.count()):
data = combo.itemData(i)
if data == _hash(component):
combo.setCurrentIndex(i)
return
raise IndexError("Component not present: %s" % component)
def _set_attribute_from_combo(self):
self.client.set_component(self.component)
self._update_window_title()
def add_data(self, data):
""" Add data item to combo box.
If first addition, also update attributes """
if self.data_present(data):
return True
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
self.client.add_layer(data)
self._update_attributes()
self._update_minmax_labels()
return True
def add_subset(self, subset):
pass
def _remove_data(self, data):
""" Remove data item from the combo box """
pass
def data_present(self, data):
return data in self._container
def register_to_hub(self, hub):
super(HistogramWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
hub.subscribe(self,
msg.DataCollectionDeleteMessage,
handler=lambda x: self._remove_data(x.data))
hub.subscribe(self,
msg.DataUpdateMessage,
handler=lambda *args: self._update_labels())
hub.subscribe(self,
msg.ComponentsChangedMessage,
handler=lambda x: self._update_attributes())
def unregister(self, hub):
self.client.unregister(hub)
hub.unsubscribe_all(self)
def _update_window_title(self):
c = self.client.component
if c is not None:
label = str(c.label)
else:
label = 'Histogram'
self.setWindowTitle(label)
def _update_labels(self):
self._update_window_title()
self._update_attributes()
def __str__(self):
return "Histogram Widget"
def options_widget(self):
return self.option_widget
| {
"repo_name": "glue-viz/glue-qt",
"path": "glue/qt/widgets/histogram_widget.py",
"copies": "1",
"size": "7610",
"license": "bsd-3-clause",
"hash": 2439217965677954000,
"line_mean": 31.660944206,
"line_max": 77,
"alpha_frac": 0.602890933,
"autogenerated": false,
"ratio": 4.003156233561284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5106047166561284,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
from skimage import img_as_float, img_as_uint
from skimage import color, data, filter
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
# Down-sample image for quicker testing.
COLOR_IMAGE = data.lena()[::5, ::5]
GRAY_IMAGE = data.camera()[::5, ::5]
SIGMA = 3
smooth = partial(filter.gaussian_filter, sigma=SIGMA)
assert_allclose = partial(np.testing.assert_allclose, atol=1e-8)
@adapt_rgb(each_channel)
def edges_each(image):
return filter.sobel(image)
@adapt_rgb(each_channel)
def smooth_each(image, sigma):
return filter.gaussian_filter(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv(image):
return filter.sobel(image)
@adapt_rgb(hsv_value)
def smooth_hsv(image, sigma):
return filter.gaussian_filter(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv_uint(image):
return img_as_uint(filter.sobel(image))
def test_gray_scale_image():
# We don't need to test both `hsv_value` and `each_channel` since
# `adapt_rgb` is handling gray-scale inputs.
assert_allclose(edges_each(GRAY_IMAGE), filter.sobel(GRAY_IMAGE))
def test_each_channel():
filtered = edges_each(COLOR_IMAGE)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
expected = img_as_float(filter.sobel(COLOR_IMAGE[:, :, i]))
assert_allclose(channel, expected)
def test_each_channel_with_filter_argument():
filtered = smooth_each(COLOR_IMAGE, SIGMA)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
def test_hsv_value():
filtered = edges_hsv(COLOR_IMAGE)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filter.sobel(value))
def test_hsv_value_with_filter_argument():
filtered = smooth_hsv(COLOR_IMAGE, SIGMA)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], smooth(value))
def test_hsv_value_with_non_float_output():
# Since `rgb2hsv` returns a float image and the result of the filtered
# result is inserted into the HSV image, we want to make sure there isn't
# a dtype mismatch.
filtered = edges_hsv_uint(COLOR_IMAGE)
filtered_value = color.rgb2hsv(filtered)[:, :, 2]
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
# Reduce tolerance because dtype conversion.
assert_allclose(filtered_value, filter.sobel(value), rtol=1e-5, atol=1e-5)
| {
"repo_name": "SamHames/scikit-image",
"path": "skimage/color/tests/test_adapt_rgb.py",
"copies": "1",
"size": "2489",
"license": "bsd-3-clause",
"hash": -4387174030869754400,
"line_mean": 28.9879518072,
"line_max": 78,
"alpha_frac": 0.6978706308,
"autogenerated": false,
"ratio": 3.1151439299123904,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43130145607123904,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy as np
try:
from scipy import array, r_, ones, arange, sort, diag, cos, rand, pi
from scipy.linalg import eigh, orth, cho_factor, cho_solve
import scipy.sparse
from scipy.sparse.linalg import lobpcg
from scipy.sparse.linalg.interface import LinearOperator
except ImportError:
pass
from .common import Benchmark
def _sakurai(n):
""" Example taken from
T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima
A moment-based method for large-scale generalized eigenvalue problems
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """
A = scipy.sparse.eye(n, n)
d0 = array(r_[5, 6*ones(n-2), 5])
d1 = -4*ones(n)
d2 = ones(n)
B = scipy.sparse.spdiags([d2, d1, d0, d1, d2], [-2, -1, 0, 1, 2], n, n)
k = arange(1, n+1)
w_ex = sort(1. / (16.*pow(cos(0.5*k*pi/(n+1)), 4))) # exact eigenvalues
return A, B, w_ex
def _mikota_pair(n):
# Mikota pair acts as a nice test since the eigenvalues
# are the squares of the integers n, n=1,2,...
x = arange(1, n + 1)
B = diag(1. / x)
y = arange(n - 1, 0, -1)
z = arange(2 * n - 1, 0, -2)
A = diag(z) - diag(y, -1) - diag(y, 1)
return A.astype(float), B.astype(float)
def _as2d(ar):
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _precond(LorU, lower, x):
y = cho_solve((LorU, lower), x)
return _as2d(y)
class Bench(Benchmark):
params = [
[],
['lobpcg', 'eigh']
]
param_names = ['n', 'solver']
def __init__(self):
self.time_mikota.__func__.params = list(self.params)
self.time_mikota.__func__.params[0] = [128, 256, 512, 1024, 2048]
self.time_mikota.__func__.setup = self.setup_mikota
self.time_sakurai.__func__.params = list(self.params)
self.time_sakurai.__func__.params[0] = [50, 400]
self.time_sakurai.__func__.setup = self.setup_sakurai
def setup_mikota(self, n, solver):
self.shape = (n, n)
self.A, self.B = _mikota_pair(n)
if solver == 'eigh' and n >= 512:
# skip: slow, and not useful to benchmark
raise NotImplementedError()
def setup_sakurai(self, n, solver):
self.shape = (n, n)
self.A, self.B, all_eigenvalues = _sakurai(n)
self.A_dense = self.A.A
self.B_dense = self.B.A
def time_mikota(self, n, solver):
m = 10
if solver == 'lobpcg':
X = rand(n, m)
X = orth(X)
LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
M = LinearOperator(self.shape,
matvec=partial(_precond, LorU, lower),
matmat=partial(_precond, LorU, lower))
eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
else:
eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m - 1))
def time_sakurai(self, n, solver):
m = 3
if solver == 'lobpcg':
X = rand(n, m)
eigs, vecs, resnh = lobpcg(self.A, X, self.B, tol=1e-6, maxiter=500,
retResidualNormsHistory=1)
else:
eigh(self.A_dense, self.B_dense, eigvals_only=True, eigvals=(0, m - 1))
# Retain old benchmark results (remove this if changing the benchmark)
time_mikota.version = "a1fb679758f7e5cf79d18cc4930afdff999fccc142fe7a4f63e73b39ab1f58bb"
time_sakurai.version = "7c38d449924fb71f777bd408072ecc883b8b05e53a6544e97da3887fbc10b235"
| {
"repo_name": "person142/scipy",
"path": "benchmarks/benchmarks/sparse_linalg_lobpcg.py",
"copies": "8",
"size": "3647",
"license": "bsd-3-clause",
"hash": 8071194480549724000,
"line_mean": 30.9912280702,
"line_max": 93,
"alpha_frac": 0.5667672059,
"autogenerated": false,
"ratio": 2.8626373626373627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7429404568537362,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy
from matplotlib import pyplot
from matplotlib import ticker
from pandas.api.types import CategoricalDtype
import seaborn
import probscale
from wqio import utils
from wqio import validate
def rotateTickLabels(ax, rotation, which, rotation_mode="anchor", ha="right"):
""" Rotates the ticklabels of a matplotlib Axes
Parameters
----------
ax : matplotlib Axes
The Axes object that will be modified.
rotation : float
The amount of rotation, in degrees, to be applied to the labels.
which : string
The axis whose ticklabels will be rotated. Valid values are 'x',
'y', or 'both'.
rotation_mode : string, optional
The rotation point for the ticklabels. Highly recommended to use
the default value ('anchor').
ha : string
The horizontal alignment of the ticks. Again, recommended to use
the default ('right').
Returns
-------
None
"""
if which == "both":
rotateTickLabels(ax, rotation, "x", rotation_mode=rotation_mode, ha=ha)
rotateTickLabels(ax, rotation, "y", rotation_mode=rotation_mode, ha=ha)
else:
if which == "x":
axis = ax.xaxis
elif which == "y":
axis = ax.yaxis
for t in axis.get_ticklabels():
t.set_horizontalalignment(ha)
t.set_rotation(rotation)
t.set_rotation_mode(rotation_mode)
def log_formatter(use_1x=True, threshold=5):
def _formatter(tick, pos=None, use_1x=True, threshold=3):
""" Formats log axes as `1 x 10^N` when N > 4 or N < -4. """
if 10 ** threshold >= tick > 1:
tick = "{:,d}".format(int(tick))
elif tick > 10 ** threshold or tick < 10 ** (-1 * threshold):
if use_1x:
tick = r"$1 \times 10 ^ {%d}$" % int(numpy.log10(tick))
else:
tick = r"$10 ^ {%d}$" % int(numpy.log10(tick))
return str(tick)
func = partial(_formatter, use_1x=use_1x, threshold=threshold)
return ticker.FuncFormatter(func)
def gridlines(
ax, xlabel=None, ylabel=None, xscale=None, yscale=None, xminor=True, yminor=True
):
""" Standard formatting for gridlines on a matplotlib Axes
Parameters
----------
ax : matplotlib Axes
The Axes object that will be modified.
xlabel, ylabel : string, optional
The labels of the x- and y-axis.
xscale, yscale : string, optional
The scale of each axis. Can be 'linear', 'log', or 'prob'.
xminor, yminor : bool, optional
Toggles the grid on minor ticks. Has no effect if minor ticks
are not present.
Returns
-------
None
"""
# set the scales
if xscale is not None:
ax.set_xscale(xscale)
if yscale is not None:
ax.set_yscale(yscale)
# set the labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# major grids
ax.yaxis.grid(True, which="major", ls="-", alpha=0.35)
ax.xaxis.grid(True, which="major", ls="-", alpha=0.35)
# minor grids
if xminor:
ax.xaxis.grid(True, which="minor", ls="-", alpha=0.17)
if yminor:
ax.yaxis.grid(True, which="minor", ls="-", alpha=0.17)
def one2one(ax, set_limits=True, set_aspect=True, **kwargs):
label = kwargs.pop("label", "1:1 Line")
axis_limits = [
numpy.min([ax.get_xlim(), ax.get_ylim()]),
numpy.max([ax.get_xlim(), ax.get_ylim()]),
]
if set_limits:
ax.set_xlim(axis_limits)
ax.set_ylim(axis_limits)
if set_aspect:
ax.set_aspect("equal")
return ax.plot(axis_limits, axis_limits, label=label, **kwargs)
def jointplot(
x=None,
y=None,
data=None,
xlabel=None,
ylabel=None,
color=None,
zeromin=True,
one2one=True,
):
""" Plots the joint distribution of two variables via seaborn
Parameters
----------
x, y : array-like or string
Sequences of values or column names found within ``data``.
data : pandas DataFrame or None, optional
An optional DataFrame containing the data.
xlabel, ylabel : string, optional
Overrides the default x- and y-axis labels.
color : matplotlib color, optional
Color used for the plot elements.
zeromin : bool, optional
When True (default), force lower axes limits to 0.
one2one : bool, optional
When True (default), plots the 1:1 line on the axis and sets
the x- and y-axis limits to be equal.
Returns
-------
jg : seaborn.JointGrid
"""
jg = seaborn.jointplot(
x=x, y=y, color=color, data=data, marginal_kws=dict(rug=True, kde=True)
)
if xlabel is None:
xlabel = jg.ax_joint.get_xlabel()
if ylabel is None:
ylabel = jg.ax_joint.get_ylabel()
jg.set_axis_labels(xlabel=xlabel, ylabel=ylabel)
if zeromin:
jg.ax_joint.set_xlim(left=0)
jg.ax_joint.set_ylim(bottom=0)
if one2one:
ax_limit_min = numpy.min([jg.ax_joint.get_xlim(), jg.ax_joint.get_ylim()])
ax_limit_max = numpy.max([jg.ax_joint.get_xlim(), jg.ax_joint.get_ylim()])
jg.ax_joint.set_xlim(left=ax_limit_min, right=ax_limit_max)
jg.ax_joint.set_ylim(bottom=ax_limit_min, top=ax_limit_max)
jg.ax_joint.plot(
[ax_limit_min, ax_limit_max],
[ax_limit_min, ax_limit_max],
marker="None",
linestyle="-",
linewidth=1.75,
color=color or "k",
alpha=0.45,
label="1:1 line",
)
jg.ax_joint.legend(frameon=False, loc="upper left")
return jg
def whiskers_and_fliers(x, q1=None, q3=None, transformout=None):
""" Computes extent of whiskers and fliers on optionally transformed
data for box and whisker plots.
Parameters
----------
x : array-like
Sequence of optionally transformed data.
q1, q3 : floats, optional
First and third quartiles of the optionally transformed data.
transformout : callable, optional
Function to un-transform the results back into the original
space of the data.
Returns
-------
whiskers_and_fliers : dict
Dictionary of whisker and fliers values.
Examples
--------
>>> x = numpy.random.lognormal(size=37)
>>> whisk_fly = whiskers_and_fliers(numpy.log(x), transformout=numpy.exp)
See also
--------
wqio.utils.figutils.boxplot
"""
wnf = {}
if transformout is None:
def transformout(x):
return x
if q1 is None:
q1 = numpy.percentile(x, 25)
if q3 is None:
q3 = numpy.percentile(x, 75)
iqr = q3 - q1
# get low extreme
loval = q1 - (1.5 * iqr)
whislo = numpy.compress(x >= loval, x)
if len(whislo) == 0 or numpy.min(whislo) > q1:
whislo = q1
else:
whislo = numpy.min(whislo)
# get high extreme
hival = q3 + (1.5 * iqr)
whishi = numpy.compress(x <= hival, x)
if len(whishi) == 0 or numpy.max(whishi) < q3:
whishi = q3
else:
whishi = numpy.max(whishi)
wnf["fliers"] = numpy.hstack(
[
transformout(numpy.compress(x < whislo, x)),
transformout(numpy.compress(x > whishi, x)),
]
)
wnf["whishi"] = transformout(whishi)
wnf["whislo"] = transformout(whislo)
return wnf
def boxplot(
boxplot_stats,
ax=None,
position=None,
width=0.8,
shownotches=True,
color="b",
marker="o",
patch_artist=True,
showmean=False,
):
"""
Draws a boxplot on an axes
Parameters
----------
boxplot_stats : list of dicts
List of matplotlib boxplot-compatible statistics to be plotted
ax : matplotlib Axes, optional
The axis on which the boxplot will be drawn.
position : int or list of int, optional
Location on the x-axis where the boxplot will be drawn.
width : float, optional (default = 0.8)
Width of the boxplots.
shownotches : bool, optional (default = True)
Toggles notched boxplots where the notches show a confidence
interval around the mediand.
color : string, optional (default = 'b')
Matplotlib color used to plot the outliers, median or box, and
the optional mean.
marker : str, optional (default = 'o')
Matplotlib marker used for the the outliers and optional mean.
patch_artist : bool, optional (default = True)
Toggles drawing the boxes as a patch filled in with ``color``
and a black median or as a black where the median is drawn
in the ``color``.
showmean : bool, optional (default = False)
Toggles inclusion of the means in the boxplots.
Returns
-------
bp : dictionary of matplotlib artists
The graphical elements of the boxplot.
"""
fig, ax = validate.axes(ax)
if position is None:
position = numpy.arange(len(boxplot_stats)) + 1
elif numpy.isscalar(position):
position = [position]
meanprops = dict(
marker=marker, markersize=6, markerfacecolor=color, markeredgecolor="Black"
)
flierprops = dict(
marker=marker,
markersize=4,
zorder=4,
markerfacecolor="none",
markeredgecolor=color,
alpha=1,
)
whiskerprops = dict(linestyle="-", color="k", linewidth=0.75, zorder=4)
if patch_artist:
medianprops = dict(linewidth=1.00, color="k", linestyle="-", zorder=5)
boxprops = dict(
edgecolor="k", facecolor=color, linewidth=0.75, zorder=4, alpha=0.5
)
else:
medianprops = dict(linewidth=1.00, color=color, linestyle="-", zorder=3)
boxprops = dict(color="k", linewidth=0.75, zorder=4)
bp = ax.bxp(
boxplot_stats,
positions=position,
widths=width,
showmeans=showmean,
meanprops=meanprops,
flierprops=flierprops,
whiskerprops=whiskerprops,
medianprops=medianprops,
boxprops=boxprops,
shownotches=shownotches,
showcaps=False,
manage_ticks=False,
patch_artist=patch_artist,
)
return bp
def probplot(
data,
ax=None,
axtype="prob",
yscale="log",
xlabel=None,
ylabel=None,
bestfit=False,
scatter_kws=None,
line_kws=None,
return_results=False,
):
""" Probability, percentile, and quantile plots.
Parameters
----------
data : sequence or array-like
1-dimensional data to be plotted
ax : optional matplotlib axes object or None (default).
The Axes on which to plot. If None is provided, one will be
created.
axtype : string (default = 'pp')
Type of plot to be created. Options are:
- 'prob': probabilty plot
- 'pp': percentile plot
- 'qq': quantile plot
yscale : string (default = 'log')
Scale for the y-axis. Use 'log' for logarithmic (default) or
'linear'.
xlabel, ylabel : string or None (default)
Axis labels for the plot.
bestfit : bool, optional (default is False)
Specifies whether a best-fit line should be added to the
plot.
scatter_kws, line_kws : dictionary
Dictionary of keyword arguments passed directly to `pyplot.plot`
when drawing the scatter points and best-fit line, respectively.
return_results : bool (default = False)
If True a dictionary of results of is returned along with the
figure. Keys are:
q - array of quantiles
x, y - arrays of data passed to function
xhat, yhat - arrays of modeled data plotted in best-fit line
res - a statsmodels Result object.
Returns
-------
fig : matplotlib.Figure
result : dictionary of linear fit results.
"""
output = probscale.viz.probplot(
data,
ax=ax,
plottype=axtype,
probax="x",
datalabel=ylabel,
problabel=xlabel,
datascale=yscale,
scatter_kws=scatter_kws,
line_kws=line_kws,
bestfit=bestfit,
return_best_fit_results=return_results,
)
return output
def _connect_spines(left_ax, right_ax, left_y, right_y, linestyle="solid", **line_kwds):
""" Connects the y-spines between two Axes
Parameters
----------
left_ax, right_ax : matplotlib Axes objects
The Axes that need to be connected.
left_y, right_y : float
Values on the spines that wil be connected.
linestyle : string, optional (default = 'solid')
The line style to use. Valid values are 'solid', 'dashed',
'dashdot', 'dotted'.
**line_kwds : keyword arguments
Additional options for style the line.
Returns
-------
connector : BboxConnector
The weird mpl-line-like-thingy that connects the spines.
"""
import matplotlib.transforms as mtrans
import mpl_toolkits.axes_grid1.inset_locator as inset
left_trans = mtrans.blended_transform_factory(left_ax.transData, left_ax.transAxes)
right_trans = mtrans.blended_transform_factory(
right_ax.transData, right_ax.transAxes
)
left_data_trans = left_ax.transScale + left_ax.transLimits
right_data_trans = right_ax.transScale + right_ax.transLimits
left_pos = left_data_trans.transform((0, left_y))[1]
right_pos = right_data_trans.transform((0, right_y))[1]
bbox = mtrans.Bbox.from_extents(0, left_pos, 0, right_pos)
right_bbox = mtrans.TransformedBbox(bbox, right_trans)
left_bbox = mtrans.TransformedBbox(bbox, left_trans)
# deal with the linestyle
connector = inset.BboxConnector(
left_bbox, right_bbox, loc1=3, loc2=2, linestyle=linestyle, **line_kwds
)
connector.set_clip_on(False)
left_ax.add_line(connector)
return connector
def parallel_coordinates(
dataframe, hue, cols=None, palette=None, showlegend=True, **subplot_kws
):
""" Produce a parallel coordinates plot from a dataframe.
Parameters
----------
dataframe : pandas.DataFrame
The data to be plotted.
hue : string
The column used to the determine assign the lines' colors.
cols : list of strings, optional
The non-hue columns to include. If None, all other columns are
used.
palette : string, optional
Name of the seaborn color palette to use.
showlegend : bool (default = True)
Toggles including a legend on the plot.
**subplot_kws : keyword arguments
Options passed directly to pyplot.subplots()
Returns
-------
fig : matplotlib Figure
"""
# get the (non-hue) columns to plot
if cols is None:
cols = dataframe.columns.tolist()
cols.remove(hue)
# subset the data, putting the hue column last
# python 3.5: data = dataframe[[*cols, hue]]
data = dataframe[[*cols, hue]]
# these plots look ridiculous in anything other than 'ticks'
with seaborn.axes_style("ticks"):
fig, axes = pyplot.subplots(ncols=len(cols), **subplot_kws)
hue_vals = dataframe[hue].unique()
colors = seaborn.color_palette(palette=palette, n_colors=len(hue_vals))
color_dict = {}
lines = []
for h, c in zip(hue_vals, colors):
lines.append(pyplot.Line2D([0], [0], linestyle="-", color=c, label=h))
color_dict[h] = c
for col, ax in zip(cols, axes):
data_limits = [(0, dataframe[col].min()), (0, dataframe[col].max())]
ax.set_xticks([0])
ax.update_datalim(data_limits)
ax.set_xticklabels([col])
ax.autoscale(axis="y")
ax.tick_params(axis="y", direction="inout")
ax.tick_params(axis="x", direction="in")
for row in data.values:
for n, (ax1, ax2) in enumerate(zip(axes[:-1], axes[1:])):
_connect_spines(ax1, ax2, row[n], row[n + 1], color=color_dict[row[-1]])
if showlegend:
fig.legend(lines, hue_vals)
fig.subplots_adjust(wspace=0)
seaborn.despine(fig=fig, bottom=True, trim=True)
return fig
def categorical_histogram(df, valuecol, bins, classifier=None, **factoropts):
""" Plot a faceted, categorical histogram.
Parameters
----------
df : pandas.DataFrame
Dataframe of storm information such as precipitation depth,
duration, presence of outflow, flow volume, etc.
valuecol : str
The name of the column that should be categorized and plotted.
bins : array-like
The right-edges of the histogram bins.
classifier : callable, optional
Function-like object that classifies the values in ``valuecol``.
Should accept a float scalar and return a string.
factoropts : keyword arguments, optional
Options passed directly to seaborn.factorplot
Returns
-------
fig : seaborn.FacetGrid
See also
--------
seaborn.factorplot
"""
def format_col(colname):
return colname.replace("_", " ").title()
def process_column(colname):
if colname is not None:
return format_col(colname)
if classifier is None:
classifier = partial(utils.classifier, bins=bins, units="mm")
cats = utils.unique_categories(classifier, bins)
cat_type = CategoricalDtype(cats, ordered=True)
aspect = factoropts.pop("aspect", 1.6)
display_col = format_col(valuecol)
processed_opts = dict(
row=process_column(factoropts.pop("row", None)),
col=process_column(factoropts.pop("col", None)),
hue=process_column(factoropts.pop("hue", None)),
kind="count",
aspect=aspect,
sharex=True,
)
final_opts = {**factoropts, **processed_opts}
fig = (
df.assign(display=df[valuecol].apply(classifier).astype(cat_type))
.drop([valuecol], axis=1)
.rename(columns={"display": valuecol})
.rename(columns=lambda c: format_col(c))
.pipe((seaborn.catplot, "data"), x=display_col, **final_opts)
.set_ylabels("Occurences")
)
return fig
| {
"repo_name": "phobson/wqio",
"path": "wqio/viz.py",
"copies": "2",
"size": "18243",
"license": "bsd-3-clause",
"hash": 333215633020895550,
"line_mean": 28.1888,
"line_max": 88,
"alpha_frac": 0.6080140328,
"autogenerated": false,
"ratio": 3.646412152708375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001893455209616882,
"num_lines": 625
} |
from functools import partial
import numpy
import chaospy
from .utils import combine_quadrature
def hypercube_quadrature(
quad_func,
order,
domain,
segments=None,
auto_scale=True,
):
"""
Enhance simple 1-dimensional unit quadrature with extra features.
These features include handling of:
* Distribution as domain by embedding density into the weights
* Scale to any intervals
* Multivariate support
* Repeat quadrature into segments
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights. If
``auto_scale`` is true, the function should be on the form:
``abscissas, weights = quad_func(order)`` and be defined on the unit
interval. Otherwise the call signature should be
``abscissas, weights = quad_func(order, lower, upper)`` and it
should be defined on interval bound by ``lower`` and ``upper``.
order (int, Sequence[int]):
The quadrature order passed to the quadrature function.
domain (Tuple[float, float], :class:`chaospy.Distribution`):
Either interval on the format ``(lower, upper)`` or a distribution
to integrate over. If the latter, weights are adjusted to
incorporate the density at abscissas.
segments (Optional[int], Sequence[float]):
The number segments to split the interval on. If sequence is
provided, use as segment edges instead.
kwargs (Any):
Extra keyword arguments passed to `quad_func`.
Returns:
Same as ``quad_func`` but adjusted to incorporate extra features.
Examples:
>>> def my_quad(order):
... return (numpy.linspace(0, 1, order+1),
... 1./numpy.full(order+1, order+2))
>>> my_quad(2)
(array([0. , 0.5, 1. ]), array([0.25, 0.25, 0.25]))
>>> hypercube_quadrature(my_quad, 2, domain=chaospy.Uniform(-1, 1))
(array([[-1., 0., 1.]]), array([0.25, 0.25, 0.25]))
>>> abscissas, weights = hypercube_quadrature(my_quad, (1, 1), domain=(0, 1))
>>> abscissas
array([[0., 0., 1., 1.],
[0., 1., 0., 1.]])
>>> weights.round(5)
array([0.11111, 0.11111, 0.11111, 0.11111])
"""
if segments is None:
order, domain = align_arguments(order, domain)
kwargs = dict(order=order)
else:
order, domain, segments = align_arguments(order, domain, segments)
kwargs = dict(order=order, segments=segments)
quad_func = partial(ensure_output, quad_func=quad_func)
if auto_scale:
if segments is not None:
quad_func = partial(split_into_segments, quad_func=quad_func)
quad_func = partial(scale_quadrature, quad_func=quad_func)
quad_func = partial(univariate_to_multivariate, quad_func=quad_func)
if isinstance(domain, chaospy.Distribution):
quad_func = partial(
distribution_to_domain, quad_func=quad_func, distribution=domain)
else:
quad_func = partial(
quad_func, lower=numpy.asarray(domain[0]), upper=numpy.asarray(domain[1]))
return quad_func(**kwargs)
def align_arguments(
order,
domain,
segments=None,
):
"""
Extract dimensions from input arguments and broadcast relevant parts.
Args:
order (int, Sequence[int]):
The quadrature order passed to the quadrature function.
domain (Tuple[float, float], :func:`chaospy.Distribution`):
Either interval on the format ``(lower, upper)`` or a distribution
to integrate over.
segments (Optional[int], Sequence[float]):
The number segments to split the interval on. If sequence is
provided, use as segment edges instead.
Examples:
>>> order, domain, segments = align_arguments(1, chaospy.Uniform(0, 1), 1)
>>> order, domain, segments
(array([1]), Uniform(), array([1]))
>>> distribution = chaospy.Iid(chaospy.Uniform(0, 1), 2)
>>> order, domain = align_arguments(1, distribution)
>>> order, domain
(array([1, 1]), Iid(Uniform(), 2))
"""
args = [numpy.asarray(order)]
if isinstance(domain, chaospy.Distribution):
args += [numpy.zeros(len(domain))]
else:
args += list(domain)
if segments is not None:
segments = numpy.atleast_1d(segments)
assert segments.ndim <= 2
if segments.ndim == 2:
args.append(segments[:, 0])
else:
args.append(segments)
args = numpy.broadcast_arrays(*args)
output = [args.pop(0)]
if not isinstance(domain, chaospy.Distribution):
output += [(args.pop(0), args.pop(0))]
else:
output += [domain]
if segments is not None:
if segments.ndim == 2:
segments = numpy.broadcast_arrays(segments, order)[0].T
else:
segments = args.pop(-1)
output += [segments]
return tuple(output)
def ensure_output(quad_func, **kwargs):
"""
Converts arrays to python native types and ensure quadrature output sizes.
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights.
kwargs (Any):
Extra keyword arguments passed to `quad_func`.
Returns:
Same as ``quad_func(order, **kwargs)`` except numpy elements in
``kwargs`` is replaced with Python native counterparts and
``abscissas`` is ensured to be at least 2-dimensional.
Examples:
>>> def my_quad(order):
... return (numpy.linspace(0, 1, order+1),
... 1./numpy.full(order+1, order+2))
>>> my_quad(2)
(array([0. , 0.5, 1. ]), array([0.25, 0.25, 0.25]))
>>> ensure_output(my_quad, order=numpy.array([2]))
(array([[0. , 0.5, 1. ]]), array([0.25, 0.25, 0.25]))
"""
kwargs = {key: (value.item() if isinstance(value, numpy.ndarray) else value)
for key, value in kwargs.items()}
abscissas, weights = quad_func(**kwargs)
abscissas = numpy.atleast_2d(abscissas)
assert abscissas.ndim == 2
assert weights.ndim == 1
assert abscissas.shape[-1] == len(weights)
return abscissas, weights
def univariate_to_multivariate(quad_func, **kwargs):
"""
Turn a univariate quadrature rule into a multivariate rule.
The one-dimensional quadrature functions are combined into a multivariate
through tensor-product. The dimensionality is inferred from the keyword
arguments. Weights are adjusted to correspond to the multivariate scheme.
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights on the unit
interval.
kwargs (Any):
Keyword arguments passed to `quad_func`. If numerical value is
provided, it is used to infer the dimensions of the multivariate
output. Non-numerical values are passed as is.
Returns:
Same as ``quad_func(order, **kwargs)`` except with multivariate
supported.
Examples:
>>> def my_quad(order):
... return (numpy.linspace(0, 1, order+1)[numpy.newaxis],
... 1./numpy.full(order+1, order+2))
>>> my_quad(1)
(array([[0., 1.]]), array([0.33333333, 0.33333333]))
>>> my_quad(2)
(array([[0. , 0.5, 1. ]]), array([0.25, 0.25, 0.25]))
>>> abscissas, weights = univariate_to_multivariate(
... my_quad, order=numpy.array([1, 2, 1]))
>>> abscissas
array([[0. , 0. , 0. , 0. , 0. , 0. , 1. , 1. , 1. , 1. , 1. , 1. ],
[0. , 0. , 0.5, 0.5, 1. , 1. , 0. , 0. , 0.5, 0.5, 1. , 1. ],
[0. , 1. , 0. , 1. , 0. , 1. , 0. , 1. , 0. , 1. , 0. , 1. ]])
>>> weights.round(6)
array([0.027778, 0.027778, 0.027778, 0.027778, 0.027778, 0.027778,
0.027778, 0.027778, 0.027778, 0.027778, 0.027778, 0.027778])
>>> univariate_to_multivariate(my_quad, order=numpy.array([1]))
(array([[0., 1.]]), array([0.33333333, 0.33333333]))
"""
sizables = {key: value for key, value in kwargs.items()
if isinstance(value, (int, float, numpy.ndarray))}
sizables["_"] = numpy.zeros(len(kwargs.get("domain", [0])))
nonsizables = {key: value for key, value in kwargs.items()
if not isinstance(value, (int, float, numpy.ndarray))}
keys = list(sizables)
args = numpy.broadcast_arrays(*[sizables[key] for key in keys])
assert args[0].ndim == 1
sizables = {key: value for key, value in zip(keys, args)}
del sizables["_"]
results = []
for idx in range(args[0].size):
sizable = kwargs.copy()
sizable.update({key: value[idx].item()
for key, value in sizables.items()})
abscissas, weights = quad_func(**sizable)
results.append((abscissas.ravel(), weights))
abscissas, weights = zip(*results)
return combine_quadrature(abscissas, weights)
def distribution_to_domain(quad_func, distribution, **kwargs):
"""
Integrate over a distribution domain.
Adjust weights to account for probability density.
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights. Must accept
the arguments ``lower`` and ``upper`` to define the interval it is
integrating over.
distribution (:class:`chaospy.Distribution`):
Distribution to adjust quadrature scheme to.
kwargs (Any):
Extra keyword arguments passed to `quad_func`. Can not include the
arguments ``lower`` and ``upper`` as they are taken from
``distribution``.
Returns:
Same as ``quad_func(order, **kwargs)`` except arguments ``lower`` and
``upper`` are now replaced with a new ``distribution`` argument.
Examples:
>>> def my_quad(lower=0, upper=1):
... return (numpy.linspace(lower, upper, 5).reshape(1, -1)[:, 1:-1],
... 1./numpy.full(3, 4))
>>> my_quad()
(array([[0.25, 0.5 , 0.75]]), array([0.25, 0.25, 0.25]))
>>> distribution_to_domain(my_quad, chaospy.Uniform(-1, 1))
(array([[-0.5, 0. , 0.5]]), array([0.125, 0.125, 0.125]))
>>> distribution_to_domain(my_quad, chaospy.Beta(2, 2))
(array([[0.25, 0.5 , 0.75]]), array([0.225, 0.3 , 0.225]))
>>> distribution_to_domain(my_quad, chaospy.Exponential(1)) # doctest: +NORMALIZE_WHITESPACE
(array([[ 8.05924772, 16.11849545, 24.17774317]]),
array([2.32578431e-02, 7.35330570e-06, 2.32485465e-09]))
"""
assert isinstance(distribution, chaospy.Distribution)
assert "lower" not in kwargs
assert "upper" not in kwargs
lower = distribution.lower
upper = distribution.upper
abscissas, weights = quad_func(lower=lower, upper=upper, **kwargs)
# Sometimes edge samples (inside the distribution domain) falls out again from simple
# rounding errors. Edge samples needs to be adjusted.
eps = 1e-14*(distribution.upper-distribution.lower)
abscissas_ = numpy.clip(abscissas.T, distribution.lower+eps, distribution.upper-eps).T
weights_ = weights*distribution.pdf(abscissas_).ravel()
weights = weights_*numpy.sum(weights)/(numpy.sum(weights_)*numpy.prod(upper-lower))
return abscissas, weights
def split_into_segments(quad_func, order, segments, **kwargs):
"""
Split a quadrature rule on ta unit interval to multiple segments.
If both quadrature function includes the abscissas endpoints 0 and 1, then
the endpoints of each subsequent interval is collapsed and their weights
added together. This to avoid nodes from being repeated.
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights on the unit
interval.
order (int):
The quadrature order passed to the quadrature function.
segments (int, Sequence[float]):
The number segments to split the interval on. If sequence is
provided, use as segment edges instead.
kwargs (Any):
Extra keyword arguments passed to `quad_func`.
Returns:
Same as ``quad_func(order, **kwargs)`` except segmented into
subintervals.
Examples:
>>> def my_quad(order):
... return (numpy.linspace(0, 1, order+1)[numpy.newaxis],
... 1./numpy.full(order+1, order+2))
>>> my_quad(2)
(array([[0. , 0.5, 1. ]]), array([0.25, 0.25, 0.25]))
>>> split_into_segments(my_quad, 4, segments=2) # doctest: +NORMALIZE_WHITESPACE
(array([[0. , 0.25, 0.5 , 0.75, 1. ]]),
array([0.125, 0.125, 0.25 , 0.125, 0.125]))
>>> split_into_segments(my_quad, 4, segments=3) # doctest: +NORMALIZE_WHITESPACE
(array([[0. , 0.16666667, 0.33333333, 0.66666667, 1. ]]),
array([0.08333333, 0.08333333, 0.19444444, 0.22222222, 0.11111111]))
>>> split_into_segments(my_quad, 4, segments=[0.2, 0.8]) # doctest: +NORMALIZE_WHITESPACE
(array([[0. , 0.1, 0.2, 0.5, 0.8, 0.9, 1. ]]),
array([0.05, 0.05, 0.2 , 0.15, 0.2 , 0.05, 0.05]))
"""
segments = numpy.array(segments)
if segments.size == 1:
segments = int(segments)
if segments == 1 or order <= 2:
return quad_func(order=order, **kwargs)
if not segments:
segments = int(numpy.sqrt(order))
assert segments < order, "few samples to distribute than intervals"
nodes = numpy.linspace(0, 1, segments+1)
else:
nodes, segments = numpy.hstack([[0], segments, [1]]), len(segments)
abscissas = []
weights = []
for idx, (lower, upper) in enumerate(zip(nodes[:-1], nodes[1:])):
assert lower < upper
order_ = order//segments + (idx < (order%segments))
abscissa, weight = quad_func(order=order_, **kwargs)
weight = weight*(upper-lower)
abscissa = (abscissa.T*(upper-lower)+lower).T
if abscissas and numpy.allclose(abscissas[-1][:, -1], lower):
weights[-1][-1] += weight[0]
abscissa = abscissa[:, 1:]
weight = weight[1:]
abscissas.append(abscissa)
weights.append(weight)
abscissas = numpy.hstack(abscissas)
weights = numpy.hstack(weights)
assert abscissas.shape == (1, len(weights))
return abscissas, weights
def scale_quadrature(quad_func, order, lower, upper, **kwargs):
"""
Scale quadrature rule designed for unit interval to an arbitrary interval.
Args:
quad_func (Callable):
Function that creates quadrature abscissas and weights on the unit
interval.
order (int):
The quadrature order passed to the quadrature function.
lower (float):
The new lower limit for the quadrature function.
upper (float):
The new upper limit for the quadrature function.
kwargs (Any):
Extra keyword arguments passed to `quad_func`.
Returns:
Same as ``quad_func(order, **kwargs)`` except scaled to a new interval.
Examples:
>>> def my_quad(order):
... return (numpy.linspace(0, 1, order+1)[numpy.newaxis],
... 1./numpy.full(order+1, order+2))
>>> my_quad(2)
(array([[0. , 0.5, 1. ]]), array([0.25, 0.25, 0.25]))
>>> scale_quadrature(my_quad, 2, lower=0, upper=2)
(array([[0., 1., 2.]]), array([0.5, 0.5, 0.5]))
>>> scale_quadrature(my_quad, 2, lower=-0.5, upper=0.5)
(array([[-0.5, 0. , 0.5]]), array([0.25, 0.25, 0.25]))
"""
abscissas, weights = quad_func(order=order, **kwargs)
assert numpy.all(abscissas >= 0) and numpy.all(abscissas <= 1)
assert numpy.sum(weights) <= 1+1e-10
assert numpy.sum(weights > 0)
weights = weights*(upper-lower)
abscissas = (abscissas.T*(upper-lower)+lower).T
return abscissas, weights
| {
"repo_name": "jonathf/chaospy",
"path": "chaospy/quadrature/hypercube.py",
"copies": "1",
"size": "16145",
"license": "mit",
"hash": 4037693981703505400,
"line_mean": 38.2822384428,
"line_max": 101,
"alpha_frac": 0.5907091979,
"autogenerated": false,
"ratio": 3.6354424679126325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47261516658126324,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import numpy
import pandas
from recordlinkage.algorithms.distance import _1d_distance
from recordlinkage.algorithms.distance import _haversine_distance
from recordlinkage.algorithms.numeric import _exp_sim
from recordlinkage.algorithms.numeric import _gauss_sim
from recordlinkage.algorithms.numeric import _linear_sim
from recordlinkage.algorithms.numeric import _squared_sim
from recordlinkage.algorithms.numeric import _step_sim
from recordlinkage.algorithms.string import cosine_similarity
from recordlinkage.algorithms.string import damerau_levenshtein_similarity
from recordlinkage.algorithms.string import jaro_similarity
from recordlinkage.algorithms.string import jarowinkler_similarity
from recordlinkage.algorithms.string import levenshtein_similarity
from recordlinkage.algorithms.string import longest_common_substring_similarity
from recordlinkage.algorithms.string import qgram_similarity
from recordlinkage.algorithms.string import smith_waterman_similarity
from recordlinkage.base import BaseCompareFeature
from recordlinkage.utils import fillna as _fillna
class Exact(BaseCompareFeature):
"""Compare the record pairs exactly.
This class is used to compare records in an exact way. The similarity
is 1 in case of agreement and 0 otherwise.
Parameters
----------
left_on : str or int
Field name to compare in left DataFrame.
right_on : str or int
Field name to compare in right DataFrame.
agree_value : float, str, numpy.dtype
The value when two records are identical. Default 1. If 'values'
is passed, then the value of the record pair is passed.
disagree_value : float, str, numpy.dtype
The value when two records are not identical.
missing_value : float, str, numpy.dtype
The value for a comparison with a missing value. Default 0.
"""
name = "exact"
description = "Compare attributes of record pairs."
def __init__(self,
left_on,
right_on,
agree_value=1,
disagree_value=0,
missing_value=0,
label=None):
super(Exact, self).__init__(left_on, right_on, label=label)
self.agree_value = agree_value
self.disagree_value = disagree_value
self.missing_value = missing_value
def _compute_vectorized(self, s_left, s_right):
# Values or agree/disagree
if self.agree_value == 'value':
compare = s_left.copy()
compare[s_left != s_right] = self.disagree_value
else:
compare = pandas.Series(self.disagree_value, index=s_left.index)
compare[s_left == s_right] = self.agree_value
# Only when disagree value is not identical with the missing value
if self.disagree_value != self.missing_value:
compare[(s_left.isnull() | s_right.isnull())] = self.missing_value
return compare
class String(BaseCompareFeature):
"""Compute the (partial) similarity between strings values.
This class is used to compare string values. The implemented algorithms
are: 'jaro','jarowinkler', 'levenshtein', 'damerau_levenshtein', 'qgram'
or 'cosine'. In case of agreement, the similarity is 1 and in case of
complete disagreement it is 0. The Python Record Linkage Toolkit uses the
`jellyfish` package for the Jaro, Jaro-Winkler, Levenshtein and Damerau-
Levenshtein algorithms.
Parameters
----------
left_on : str or int
The name or position of the column in the left DataFrame.
right_on : str or int
The name or position of the column in the right DataFrame.
method : str, default 'levenshtein'
An approximate string comparison method. Options are ['jaro',
'jarowinkler', 'levenshtein', 'damerau_levenshtein', 'qgram',
'cosine', 'smith_waterman', 'lcs']. Default: 'levenshtein'
threshold : float, tuple of floats
A threshold value. All approximate string comparisons higher or
equal than this threshold are 1. Otherwise 0.
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.
"""
name = "string"
description = "Compare string attributes of record pairs."
def __init__(self,
left_on,
right_on,
method='levenshtein',
threshold=None,
missing_value=0.0,
label=None):
super(String, self).__init__(left_on, right_on, label=label)
self.method = method
self.threshold = threshold
self.missing_value = missing_value
def _compute_vectorized(self, s_left, s_right):
if self.method == 'jaro':
str_sim_alg = jaro_similarity
elif self.method in ['jarowinkler', 'jaro_winkler', 'jw']:
str_sim_alg = jarowinkler_similarity
elif self.method == 'levenshtein':
str_sim_alg = levenshtein_similarity
elif self.method in [
'dameraulevenshtein', 'damerau_levenshtein', 'dl'
]:
str_sim_alg = damerau_levenshtein_similarity
elif self.method in ['q_gram', 'qgram']:
str_sim_alg = qgram_similarity
elif self.method == 'cosine':
str_sim_alg = cosine_similarity
elif self.method in ['smith_waterman', 'smithwaterman', 'sw']:
str_sim_alg = smith_waterman_similarity
elif self.method in ['longest_common_substring', 'lcs']:
str_sim_alg = longest_common_substring_similarity
else:
raise ValueError("The algorithm '{}' is not known.".format(
self.method))
c = str_sim_alg(s_left, s_right)
if self.threshold is not None:
c = c.where((c < self.threshold) | (pandas.isnull(c)), other=1.0)
c = c.where((c >= self.threshold) | (pandas.isnull(c)), other=0.0)
c = _fillna(c, self.missing_value)
return c
class Numeric(BaseCompareFeature):
"""Compute the (partial) similarity between numeric values.
This class is used to compare numeric values. The implemented algorithms
are: 'step', 'linear', 'exp', 'gauss' or 'squared'. In case of agreement,
the similarity is 1 and in case of complete disagreement it is 0. The
implementation is similar with numeric comparing in ElasticSearch, a full-
text search tool. The parameters are explained in the image below (source
ElasticSearch, The Definitive Guide)
.. image:: /images/elas_1705.png
:width: 100%
:target: https://www.elastic.co/guide/en/elasticsearch/
guide/current/decay-functions.html
:alt: Decay functions, like in ElasticSearch
Parameters
----------
left_on : str or int
The name or position of the column in the left DataFrame.
right_on : str or int
The name or position of the column in the right DataFrame.
method : float
The metric used. Options 'step', 'linear', 'exp', 'gauss' or
'squared'. Default 'linear'.
offset : float
The offset. See image above.
scale : float
The scale of the numeric comparison method. See the image above.
This argument is not available for the 'step' algorithm.
origin : float
The shift of bias between the values. See image above.
missing_value : numpy.dtype
The value if one or both records have a missing value on the
compared field. Default 0.
Note
----
Numeric comparing can be an efficient way to compare date/time
variables. This can be done by comparing the timestamps.
"""
name = "numeric"
description = "Compare numeric attributes of record pairs."
def __init__(self,
left_on,
right_on,
method='linear',
offset=0.0,
scale=1.0,
origin=0.0,
missing_value=0.0,
label=None):
super(Numeric, self).__init__(left_on, right_on, label=label)
self.method = method
self.offset = offset
self.scale = scale
self.origin = origin
self.missing_value = missing_value
def _compute_vectorized(self, s_left, s_right):
d = _1d_distance(s_left, s_right)
if self.method == 'step':
num_sim_alg = partial(_step_sim, d, self.offset, self.origin)
elif self.method in ['linear', 'lin']:
num_sim_alg = partial(_linear_sim, d, self.scale, self.offset,
self.origin)
elif self.method == 'squared':
num_sim_alg = partial(_squared_sim, d, self.scale, self.offset,
self.origin)
elif self.method in ['exp', 'exponential']:
num_sim_alg = partial(_exp_sim, d, self.scale, self.offset,
self.origin)
elif self.method in ['gauss', 'gaussian']:
num_sim_alg = partial(_gauss_sim, d, self.scale, self.offset,
self.origin)
else:
raise ValueError("The algorithm '{}' is not known.".format(
self.method))
c = num_sim_alg()
c = _fillna(c, self.missing_value)
return c
class Geographic(BaseCompareFeature):
"""Compute the (partial) similarity between WGS84 coordinate values.
Compare the geometric (haversine) distance between two WGS-
coordinates. The similarity algorithms are 'step', 'linear', 'exp',
'gauss' or 'squared'. The similarity functions are the same as in
:meth:`recordlinkage.comparing.Compare.numeric`
Parameters
----------
left_on_lat : tuple
The name or position of the latitude in the
left DataFrame.
left_on_lng : tuple
The name or position of the longitude in the
left DataFrame.
right_on_lat : tuple
The name or position of the latitude in the
right DataFrame.
right_on_lng : tuple
The name or position of the longitude in the
right DataFrame.
method : str
The metric used. Options 'step', 'linear', 'exp', 'gauss' or
'squared'. Default 'linear'.
offset : float
The offset. See Compare.numeric.
scale : float
The scale of the numeric comparison method. See Compare.numeric.
This argument is not available for the 'step' algorithm.
origin : float
The shift of bias between the values. See Compare.numeric.
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.
"""
name = "geographic"
description = "Compare geographic attributes of record pairs."
def __init__(self,
left_on_lat,
left_on_lng,
right_on_lat,
right_on_lng,
method=None,
offset=0.0,
scale=1.0,
origin=0.0,
missing_value=0.0,
label=None):
super(Geographic, self).__init__(
(left_on_lat, left_on_lng), (right_on_lat, right_on_lng),
label=label)
self.method = method
self.offset = offset
self.scale = scale
self.origin = origin
self.missing_value = missing_value
def _compute_vectorized(self, lat1, lng1, lat2, lng2):
d = _haversine_distance(lat1, lng1, lat2, lng2)
if self.method == 'step':
num_sim_alg = partial(_step_sim, d, self.offset, self.origin)
elif self.method in ['linear', 'lin']:
num_sim_alg = partial(_linear_sim, d, self.scale, self.offset,
self.origin)
elif self.method == 'squared':
num_sim_alg = partial(_squared_sim, d, self.scale, self.offset,
self.origin)
elif self.method in ['exp', 'exponential']:
num_sim_alg = partial(_exp_sim, d, self.scale, self.offset,
self.origin)
elif self.method in ['gauss', 'gaussian']:
num_sim_alg = partial(_gauss_sim, d, self.scale, self.offset,
self.origin)
else:
raise ValueError("The algorithm '{}' is not known.".format(
self.method))
c = num_sim_alg()
c = _fillna(c, self.missing_value)
return c
class Date(BaseCompareFeature):
"""Compute the (partial) similarity between date values.
Parameters
----------
left_on : str or int
The name or position of the column in the left DataFrame.
right_on : str or int
The name or position of the column in the right DataFrame.
swap_month_day : float
The value if the month and day are swapped. Default 0.5.
swap_months : list of tuples
A list of tuples with common errors caused by the translating of
months into numbers, i.e. October is month 10. The format of the
tuples is (month_good, month_bad, value). Default : swap_months =
[(6, 7, 0.5), (7, 6, 0.5), (9, 10, 0.5), (10, 9, 0.5)]
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.0.
"""
name = "date"
description = "Compare date attributes of record pairs."
def __init__(self,
left_on,
right_on,
swap_month_day=0.5,
swap_months='default',
errors='coerce',
missing_value=0.0,
label=None):
super(Date, self).__init__(left_on, right_on, label=label)
self.missing_value = missing_value
self.swap_months = swap_months
self.swap_month_day = swap_month_day
self.errors = errors
def _compute_vectorized(self, s_left, s_right):
# validate datatypes
if str(s_left.dtype) != 'datetime64[ns]':
raise ValueError('Left column is not of type datetime64[ns]')
if str(s_right.dtype) != 'datetime64[ns]':
raise ValueError('Right column is not of type datetime64[ns]')
c = (s_left == s_right).astype(numpy.int64) # start with int64
# The case is which there is a swap_month_day value given.
if (self.swap_month_day and self.swap_month_day != 0):
c[(s_left.dt.year == s_right.dt.year) &
(s_left.dt.month == s_right.dt.day) &
(s_left.dt.day == s_right.dt.month) &
(c != 1)] = self.swap_month_day
if (self.swap_months and self.swap_months != 0):
if self.swap_months == 'default':
self.swap_months = [(6, 7, 0.5), (7, 6, 0.5), (9, 10, 0.5),
(10, 9, 0.5)]
else:
try:
if not all([len(x) == 3 for x in self.swap_months]):
raise Exception
except Exception:
raise ValueError(
'swap_months must be a list of (first month, \
second month, value) tuples or lists. ')
for month1, month2, value in self.swap_months:
c[(s_left.dt.year == s_right.dt.year) &
(s_left.dt.month == month1) &
(s_right.dt.month == month2) &
(s_left.dt.day == s_right.dt.day) & (c != 1)] = value
c = pandas.Series(c)
c[s_left.isnull() | s_right.isnull()] = self.missing_value
return c
class Variable(BaseCompareFeature):
"""Add a variable of the dataframe as feature.
Parameters
----------
left_on : str or int
The name or position of the column in the left DataFrame.
right_on : str or int
The name or position of the column in the right DataFrame.
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.0.
"""
name = "variable"
description = "Add a variable of the dataframe to the features."
def __init__(self,
left_on=None,
right_on=None,
missing_value=0.0,
label=None):
super(Variable, self).__init__(left_on, right_on, label=label)
self.missing_value = missing_value
def _compute_vectorized(self, *data):
result = []
if isinstance(data, tuple):
for col in data:
result_i = _fillna(col, self.missing_value)
result.append(result_i)
else:
result_0 = _fillna(data, self.missing_value)
result.append(result_0)
return tuple(result)
class VariableA(Variable):
"""Add a variable of the left dataframe as feature.
Parameters
----------
on : str or int
The name or position of the column in the left DataFrame.
normalise : bool
Normalise the outcome. This is needed for good result in many
classification models. Default True.
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.0.
"""
name = "variable"
description = "Add a variable of the left dataframe to the features."
def __init__(self, on=None, missing_value=0.0, label=None):
super(VariableA, self).__init__(
on, None, missing_value=missing_value, label=label)
class VariableB(Variable):
"""Add a variable of the right dataframe as feature.
Parameters
----------
on : str or int
The name or position of the column in the right DataFrame.
normalise : bool
Normalise the outcome. This is needed for good result in many
classification models. Default True.
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.0.
"""
name = "variable"
description = "Add a variable of the right dataframe to the features."
def __init__(self, on=None, missing_value=0.0, label=None):
super(VariableB, self).__init__(
None, on, missing_value=missing_value, label=label)
class Frequency(BaseCompareFeature):
"""Compute the (relative) frequency of each variable.
Parameters
----------
left_on : str or int
The name or position of the column in the left DataFrame.
right_on : str or int
The name or position of the column in the right DataFrame.
normalise : bool
Normalise the outcome. This is needed for good result in many
classification models. Default True.
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.0.
"""
name = "frequency"
description = "Compute the frequency."
def __init__(self,
left_on=None,
right_on=None,
normalise=True,
missing_value=0.0,
label=None):
super(Frequency, self).__init__(left_on, right_on, label=label)
self.normalise = normalise
self.missing_value = missing_value
def _compute_frequency(self, col):
# https://github.com/pydata/pandas/issues/3729
na_value = 'NAN'
value_count = col.fillna(na_value)
c = value_count.groupby(by=value_count).transform('count')
c = c.astype(numpy.float64)
if self.normalise:
c = c / len(col)
# replace missing values
c[col.isnull()] = self.missing_value
return c
def _compute_vectorized(self, *data):
result = []
if isinstance(data, tuple):
for col in data:
result_i = self._compute_frequency(col)
result.append(result_i)
else:
result_i = self._compute_frequency(*data)
result.append(result_i)
return tuple(result)
class FrequencyA(Frequency):
"""Compute the frequency of a variable in the left dataframe.
Parameters
----------
on : str or int
The name or position of the column in the left DataFrame.
normalise : bool
Normalise the outcome. This is needed for good result in many
classification models. Default True.
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.0.
"""
name = "frequency"
description = "Compute the frequency."
def __init__(self, on=None, normalise=True, missing_value=0.0, label=None):
super(FrequencyA, self).__init__(
on,
None,
normalise=normalise,
missing_value=missing_value,
label=label)
class FrequencyB(Frequency):
"""Compute the frequency of a variable in the right dataframe.
Parameters
----------
on : str or int
The name or position of the column in the right DataFrame.
normalise : bool
Normalise the outcome. This is needed for good result in many
classification models. Default True.
missing_value : numpy.dtype
The value for a comparison with a missing value. Default 0.0.
"""
name = "frequency"
description = "Compute the frequency."
def __init__(self, on=None, normalise=True, missing_value=0.0, label=None):
super(FrequencyB, self).__init__(
None,
on,
normalise=normalise,
missing_value=missing_value,
label=label)
| {
"repo_name": "J535D165/recordlinkage",
"path": "recordlinkage/compare.py",
"copies": "1",
"size": "21547",
"license": "bsd-3-clause",
"hash": 8210599465971770000,
"line_mean": 33.6414790997,
"line_max": 79,
"alpha_frac": 0.5948855989,
"autogenerated": false,
"ratio": 4.1190976868667555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5213983285766756,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import OpenGL.GL as gl
from pyqtgraph.opengl.GLGraphicsItem import GLGraphicsItem
class displaylist(object):
def __init__(self, func):
self.func = func
def __call__(self, obj):
if hasattr(obj, '_display_list'):
l = getattr(obj, '_display_list')
if l:
gl.glCallList(l)
return
l = gl.glGenLists(1)
gl.glNewList(l, gl.GL_COMPILE)
self.func(obj)
gl.glEndList()
setattr(obj, '_display_list', l)
def __get__(self, obj, objtype):
"""Support instance methods."""
return partial(self.__call__, obj)
class HCItem(GLGraphicsItem, object):
def delChildren(self):
self._GLGraphicsItem__children = set()
def redraw(self):
self._display_list = None
from cross import Cross
from gcode import GCode
from grid import Grid
from model import Model
from text import Text
from ruler import Ruler, YRuler, ZRuler
from probelist import ProbeList
from proberesult import ProbeResult
| {
"repo_name": "hackerspace/hacked_cnc",
"path": "hc/ui/glitems/__init__.py",
"copies": "1",
"size": "1062",
"license": "bsd-3-clause",
"hash": -160065575025616060,
"line_mean": 23.6976744186,
"line_max": 58,
"alpha_frac": 0.6327683616,
"autogenerated": false,
"ratio": 3.765957446808511,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4898725808408511,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import os
import pytest
import subprocess
from miniworld import Scenario
from tests.conftest import create_runner
@pytest.fixture(scope='session')
def runner(tmpdir_factory, image_path, request, config_path):
runner = create_runner(tmpdir_factory, request, config_path)
with runner() as r:
r.connection_modes = set()
yield r
@pytest.fixture
def snapshot_runner(runner):
yield runner
runner.stop(hard=False)
# TODO: theoretically we need to manually check network connectivity since network checking code may not run at all :/
def _create_scenarios(connection_mode):
for execution_mode in Scenario.ScenarioConfig.EXECUTION_MODES:
def fun(connection_mode, execution_mode, image_path, request, core_topologies_dir):
return {
"scenario": "acceptance_network_switching",
"walk_model": {
"name": "core"
},
"cnt_nodes": 5,
"provisioning": {
"image": image_path,
"regex_shell_prompt": "root@OpenWrt:/#",
"shell": {
"pre_network_start": {
"shell_cmds": [
# we need to wait for the NICs to be up
"until ifconfig eth0; do echo -n . && sleep 1; done",
"until ifconfig br-lan ; do echo -n . && sleep 1; done",
"ifconfig eth0 down",
"ifconfig br-lan down",
"brctl delif br-lan eth0",
"ifconfig eth0 up",
"ifconfig -a",
"brctl show"
]
}
}
},
"network": {
"backend": {
"name": "bridged",
"connection_mode": connection_mode,
"execution_mode": {
"name": execution_mode,
}
},
"links": {
"model": "miniworld.model.network.linkqualitymodels.LinkQualityModelRange.LinkQualityModelRange"
},
"core": {
"topologies": [
[0, os.path.join(core_topologies_dir, "chain5.xml")],
[0, os.path.join(core_topologies_dir, "clique5.xml")],
[0, os.path.join(core_topologies_dir, "cycle5.xml")],
[0, os.path.join(core_topologies_dir, "star5.xml")],
[0, os.path.join(core_topologies_dir, "wheel5.xml")],
],
"mode": "lan"
}
}
}
# we need to inject the environment into the function
yield partial(fun, connection_mode, execution_mode), '{}_{}'.format(connection_mode, execution_mode)
@pytest.mark.parametrize('scenario_fun',
**dict(zip(['argvalues', 'ids'], zip(*_create_scenarios(Scenario.CONNECTION_MODE_SINGLE)))))
def test_network_switching_bridged_backends_single(scenario_fun, snapshot_runner, image_path, request,
core_topologies_dir):
_test_network_switch_bridged_backends(core_topologies_dir, image_path, request, snapshot_runner, scenario_fun)
@pytest.mark.parametrize('scenario_fun',
**dict(zip(['argvalues', 'ids'], zip(*_create_scenarios(Scenario.CONNECTION_MODE_MULTI)))))
def test_network_switching_bridged_backends_multi(scenario_fun, snapshot_runner, image_path, request,
core_topologies_dir):
_test_network_switch_bridged_backends(core_topologies_dir, image_path, request, snapshot_runner, scenario_fun)
def _test_network_switch_bridged_backends(core_topologies_dir, image_path, request, runner, scenario_fun):
scenario = scenario_fun(image_path, request, core_topologies_dir)
connection_mode = scenario['network']['backend']['connection_mode']
if connection_mode not in runner.connection_modes:
force_snapshot_boot = False
runner.connection_modes.add(connection_mode)
else:
force_snapshot_boot = True
brctl_output_before = subprocess.check_call(['brctl', 'show'])
ebtables_before = subprocess.check_call(['ebtables', '-L'])
runner.start_scenario(scenario, force_snapshot_boot=force_snapshot_boot)
for i in range(len(scenario['network']['core']['topologies'])):
runner.step()
brctl_output_after = subprocess.check_call(['brctl', 'show'])
ebtables_after = subprocess.check_call(['ebtables', '-L'])
# check cleanup done correctly
assert brctl_output_before == brctl_output_after, 'network backend cleanup not working'
assert ebtables_before == ebtables_after, 'network backend cleanup not working'
| {
"repo_name": "miniworld-project/miniworld_core",
"path": "tests/acceptance/test_network_switching.py",
"copies": "1",
"size": "5145",
"license": "mit",
"hash": -2968943842650238000,
"line_mean": 44.1315789474,
"line_max": 120,
"alpha_frac": 0.5358600583,
"autogenerated": false,
"ratio": 4.401197604790419,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018122848340514676,
"num_lines": 114
} |
from functools import partial
import pandas as pd
import six
try:
from PyQt5 import QtCore, QtWidgets
except ImportError:
raise ImportError('PyQt5 is not installed. Please install PyQt5 to use '
'GUI related functions in py_entitymatching.')
import py_entitymatching as em
class DataFrameTableView(QtWidgets.QTableWidget):
"""
Class implementing DataFrame table view
"""
def __init__(self, controller, dataframe):
super(DataFrameTableView, self).__init__()
# Set the parameters and set up the GUI
self.controller = controller
self.dataframe = dataframe
em._viewapp = QtWidgets.QApplication.instance()
if em._viewapp is None:
em._viewapp = QtWidgets.QApplication([])
self.setup_gui()
def set_dataframe(self, dataframe):
# Set the DataFrame
self.dataframe = dataframe
def setup_gui(self):
# Set up the GUI for DataFrame table
# Set rowcount
nrows = len(self.dataframe)
self.setRowCount(nrows)
# Set col count
ncols = len(self.dataframe.columns)
self.setColumnCount(ncols + 2) # + 2 because of show and debug icons
# Set headers
# hHriz. header
headers = ['Show', 'Debug']
headers.extend(list(self.dataframe.columns))
self.setHorizontalHeaderLabels(headers)
self.horizontalHeader().setStretchLastSection(True)
# vertic. header
self.verticalHeader().setVisible(True)
# populate data
# print self.dataframe
if nrows > 0:
for i in range(nrows):
for j in range(ncols + 2):
if j == 0:
button = QtWidgets.QPushButton('Show', self)
self.setCellWidget(i, j, button)
button.clicked.connect(
partial(self.controller.handle_show_button, i))
elif j == 1:
button = QtWidgets.QPushButton('Debug', self)
self.setCellWidget(i, j, button)
button.clicked.connect(
partial(self.controller.handle_debug_button, i))
else:
if pd.isnull(self.dataframe.iloc[i, j - 2]):
self.setItem(i, j, QtWidgets.QTableWidgetItem(""))
else:
self.setItem(i, j, QtWidgets.QTableWidgetItem(
str(self.dataframe.iloc[i, j - 2])
))
self.item(i, j).setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
class DataFrameTableViewWithLabel(QtWidgets.QWidget):
"""
Class implementing DataFrame table view with the label
"""
def __init__(self, controller, dataframe, label):
super(DataFrameTableViewWithLabel, self).__init__()
# Set the parameters
self.dataframe = dataframe
self.label = label
self.controller = controller
self.tbl_obj = None
self.label_obj = None
em._viewapp = QtWidgets.QApplication.instance()
if em._viewapp is None:
em._viewapp = QtWidgets.QApplication([])
self.setup_gui()
def set_dataframe(self, data_frame):
# Set the DataFrame
self.dataframe = data_frame
def set_label(self, label):
# Set the label
self.label = label
def setup_gui(self):
# Setup the GUI
label = QtWidgets.QLabel(self.label)
tbl_view = DataFrameTableView(self.controller, self.dataframe)
self.label_obj = label
self.tbl_obj = tbl_view
layout = QtWidgets.QVBoxLayout()
layout.addWidget(label)
layout.addWidget(tbl_view)
# Set the layout
self.setLayout(layout)
class DictTableViewWithLabel(QtWidgets.QWidget):
"""
Class implementing Dictionary table view with label
"""
def __init__(self, controller, dictionary, label, combo_box=None):
super(DictTableViewWithLabel, self).__init__()
# Set the parameters
self.dictionary = dictionary
self.label = label
self.controller = controller
self.combo_box = combo_box
em._viewapp = QtWidgets.QApplication.instance()
if em._viewapp is None:
em._viewapp = QtWidgets.QApplication([])
app = em._viewapp
# Set up the GUI
self.setup_gui()
def setup_gui(self):
# Set up the GUI
# Set up the label
label = QtWidgets.QLabel(self.label)
# Create a dict table view
dict_view = DictTableView(self.controller, self.dictionary,
self.combo_box)
layout = QtWidgets.QVBoxLayout()
# Set the label
layout.addWidget(label)
layout.addWidget(dict_view)
# Set the layout
self.setLayout(layout)
class DictTableView(QtWidgets.QTableWidget):
"""
Class implementing the Dictionary table view
"""
def __init__(self, controller, dictionary, combo_box=None):
super(DictTableView, self).__init__()
# Set the parameters
self.controller = controller
self.dictionary = dictionary
self.combo_box = combo_box
em._viewapp = QtWidgets.QApplication.instance()
if em._viewapp is None:
em._viewapp = QtWidgets.QApplication([])
self.setup_gui()
def set_dictionary(self, dictionary):
# Set the dictionary
self.dictionary = dictionary
def setup_gui(self):
# Set up the GUI
# Set the sorting enabled
self.setSortingEnabled(False)
# Set the column count to be 1
self.setColumnCount(1)
# Get the number of rows
nrows = len(list(self.dictionary.keys()))
if self.combo_box is not None:
nrows += 1
self.setRowCount(nrows)
# Horizontal headers
self.setHorizontalHeaderLabels(['Value'])
self.horizontalHeader().setStretchLastSection(True)
# Vertical headers
h = list(self.dictionary.keys())
h.append('Show')
self.setVerticalHeaderLabels(h)
idx = 0
for k, v in six.iteritems(self.dictionary):
self.setItem(idx, 0, QtWidgets.QTableWidgetItem(str(v)))
idx += 1
if self.combo_box is not None:
self.setCellWidget(idx, 0, self.combo_box)
class TreeView(QtWidgets.QTreeWidget):
"""
Class implementing the Tree view
"""
def __init__(self, controller, type, debug_result):
super(TreeView, self).__init__()
# Initialize the parameters
self.controller = controller
self.debug_result = debug_result
self.type = type
em._viewapp = QtWidgets.QApplication.instance()
if em._viewapp is None:
em._viewapp = QtWidgets.QApplication([])
app = em._viewapp
self.setup_gui()
def setup_gui(self):
# Set up the GUI, set the header appropriately
if self.type == 'dt':
header = QtWidgets.QTreeWidgetItem(["Debug-Tree", "Status", "Predicate",
"Feature value"])
self.setHeaderItem(header)
root = self.get_treewidget_items_for_dt()
elif self.type == 'rf':
header = QtWidgets.QTreeWidgetItem(["Debug-Tree", "Status", "Predicate",
"Feature value"])
self.setHeaderItem(header)
root = self.get_treewidget_items_for_rf()
elif self.type == 'rm':
header = QtWidgets.QTreeWidgetItem(["Debug-Rules", "Status",
"Conjunct", "Feature value"])
self.setHeaderItem(header)
root = self.get_treewidget_items_for_rm()
else:
raise TypeError('Unknown matcher type ')
def get_treewidget_items_for_dt(self):
"""
Get treewidget iterms for decision tree
"""
# This is going to create a tree widget for debugging dt
# matcher.
overall_status = self.debug_result[0]
node_list = self.debug_result[1]
root = QtWidgets.QTreeWidgetItem(self, ['Nodes', str(overall_status), '',
''])
idx = 0
for ls in node_list:
temp = QtWidgets.QTreeWidgetItem(root, ['', '', '', ''])
temp = QtWidgets.QTreeWidgetItem(root, ['Node '
+ str(idx + 1), str(ls[0]),
str(ls[1]), str(ls[2])])
idx += 1
return root
def get_treewidget_items_for_rf(self):
"""
Get treewidget iterms for random forest
"""
# This is going to create a tree widget for debugging rf
# matcher.
overall_status = self.debug_result[0]
consol_node_list = self.debug_result[1]
root = QtWidgets.QTreeWidgetItem(self,
['Trees(' + str(len(consol_node_list))
+ ')', str(overall_status), '', ''])
tree_idx = 1
for node_list in consol_node_list:
sub_root = QtWidgets.QTreeWidgetItem(root, ['', '', '', ''])
sub_root = QtWidgets.QTreeWidgetItem(root, ['Tree ' + str(tree_idx),
str(node_list[0]), '', ''])
node_idx = 1
for ls in node_list[1]:
temp = QtWidgets.QTreeWidgetItem(sub_root, ['', '', '', ''])
temp = QtWidgets.QTreeWidgetItem(sub_root, ['Node ' + str(node_idx),
str(ls[0]), str(ls[1]),
str(ls[2])])
node_idx += 1
tree_idx += 1
return root
def get_treewidget_items_for_rm(self):
"""
Get treewidget iterms for rule based matcher forest
"""
# This is going to create a tree widget for debugging rule based matcher
# matcher.
overall_status = self.debug_result[0]
consol_rule_list = self.debug_result[1]
root = QtWidgets.QTreeWidgetItem(self, ['Rules(' + str(
len(consol_rule_list)) + ')', str(overall_status),
'', ''])
rule_idx = 1
for rule_list in consol_rule_list:
sub_root = QtWidgets.QTreeWidgetItem(root, ['', '', '', ''])
sub_root = QtWidgets.QTreeWidgetItem(root, ['Rule ' + str(rule_idx),
str(rule_list[0]), '', ''])
node_idx = 1
for ls in rule_list[1]:
temp = QtWidgets.QTreeWidgetItem(sub_root, ['', '', '', ''])
temp = QtWidgets.QTreeWidgetItem(sub_root, ['Conjunct ' +
str(node_idx),
str(ls[0]), str(ls[1]),
str(ls[2])])
node_idx += 1
rule_idx += 1
return root
class TreeViewWithLabel(QtWidgets.QWidget):
"""
Class implementing Tree view with label
"""
def __init__(self, controller, label, type, debug_result):
super(TreeViewWithLabel, self).__init__()
# Initialize the parameters
self.type = type
self.debug_result = debug_result
self.label = label
self.controller = controller
em._viewapp = QtWidgets.QApplication.instance()
if em._viewapp is None:
em._viewapp = QtWidgets.QApplication([])
# Set up the GUI
self.setup_gui()
def setup_gui(self):
label = QtWidgets.QLabel(self.label)
tree_view = TreeView(self.controller, self.type, self.debug_result)
# Set up the GUI with tree and label
layout = QtWidgets.QVBoxLayout()
layout.addWidget(label)
layout.addWidget(tree_view)
# Set the layout
self.setLayout(layout)
| {
"repo_name": "anhaidgroup/py_entitymatching",
"path": "py_entitymatching/gui/gui_utils.py",
"copies": "1",
"size": "12328",
"license": "bsd-3-clause",
"hash": 330253203257937600,
"line_mean": 34.8372093023,
"line_max": 84,
"alpha_frac": 0.5356099935,
"autogenerated": false,
"ratio": 4.487804878048781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5523414871548781,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import param
import numpy as np
import pandas as pd
import holoviews as hv
import datashader as ds
import colorcet as cc
from param import ParameterizedFunction, ParamOverrides
from holoviews.core.operation import Operation
from holoviews.streams import Stream, BoundsXY, LinkedStream
from holoviews.plotting.bokeh.callbacks import Callback
from holoviews.operation.datashader import datashade, dynspread
from holoviews.operation import decimate
decimate.max_samples = 5000
import parambokeh
from bokeh.palettes import Greys9
# Define Stream class that stores filters for various Dimensions
class FilterStream(Stream):
"""
Stream to apply arbitrary filtering on a Dataset.
Many of the plotting functions accept a `FilterStream` object;
the utility of this is that you can define a single `FilterStream`,
and if you connect the same one to all your plots, then all of the
selections/flag selections/etc. can be linked.
See the demo notebooks for an example of usage.
"""
filter_range = param.Dict(default={}, doc="""
Ranges of parameters to select.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
class FlagSetter(Stream):
"""Stream for setting flags
Most useful in context of a parambokeh widget, e.g.:
from explorer.plots import FlagSetter
import parambokeh
flag_setter = FlagSetter(filter_stream=filter_stream, flags=data.flags, bad_flags=data.flags)
parambokeh.Widgets(flag_setter, callback=flag_setter.event, push=False, on_init=True)
Where `filter_stream` has been previously defined and connected to other plots
for which you want to see points with certain flags shown/hidden/etc.
"""
flags = param.ListSelector(default=[], objects=[], doc="""
Flags to select""")
bad_flags = param.ListSelector(default=[], doc="""
Flags to ignore""")
def __init__(self, filter_stream, **kwargs):
super(FlagSetter, self).__init__(**kwargs)
self.filter_stream = filter_stream
def event(self, **kwargs):
self.filter_stream.event(**kwargs)
class SkyFlags(Stream):
"""Experimental; not currently used for anything
"""
flags = param.ListSelector(default=[], objects=[])
bad_flags = param.ListSelector(default=[], doc="""
Flags to ignore""")
cmap = param.String(default='coolwarm') # make this a list to select from
output = parambokeh.view.Plot()
def __init__(self, dset, vdim, filter_stream, **kwargs):
super(FlagSetter, self).__init__(**kwargs)
self.dset = dset
self.filter_stream = filter_stream
self.vdim = vdim
def points(self, *args, **kwargs):
return hv.util.Dyanmic(data.ds, operation=skypoints, streams=[self.filter_stream])
def event(self, **kwargs):
if not self.output or any(k in kwargs for k in ['cmap']):
self.output = dynspread(datashade(self.points, cmap=cc.palette[kwargs['cmap']]))
else:
self.filter_stream.event(**kwargs)
# super(SkyFlags, self).event(**kwargs)
#######################################################################################
# All this enables bokeh "reset" button to also reset a stream (such as FilterStream) #
# Not sure if some of this should be updated for newer version of HV, as this was put #
# together circa v1.9.0, I think
class ResetCallback(Callback):
models = ['plot']
on_events = ['reset']
class Reset(LinkedStream):
def __init__(self, *args, **params):
super(Reset, self).__init__(self, *args, **dict(params, transient=True))
Stream._callbacks['bokeh'][Reset] = ResetCallback
#######################################################################################
class filter_dset(Operation):
"""Process a dataset based on FilterStream state (filter_range, flags, bad_flags)
This is used in many applications to define dynamically selected `holoviews.Dataset`
objects.
"""
filter_range = param.Dict(default={}, doc="""
Dictionary of filter bounds.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
def _process(self, dset, key=None):
filter_dict = self.p.filter_range.copy()
filter_dict.update({f:True for f in self.p.flags})
filter_dict.update({f:False for f in self.p.bad_flags})
if self.p.filter_range is not None:
dset = dset.select(**filter_dict)
return dset
# Define Operation that filters based on FilterStream state (which provides the filter_range)
class filterpoints(Operation):
"""Process a dataset based on FilterStream state (filter_range, flags, bad_flags)
This is used in many applications to define dynamically selected `holoviews.Points`
objects.
"""
filter_range = param.Dict(default={}, doc="""
Dictionary of filter bounds.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
xdim = param.String(default='x', "Name of x-dimension")
ydim = param.String(default='y', "Name of y-dimension")
set_title = param.Boolean(default=False)
def _process(self, dset, key=None):
dset = filter_dset(dset, flags=self.p.flags, bad_flags=self.p.bad_flags,
filter_range=self.p.filter_range)
kdims = [dset.get_dimension(self.p.xdim), dset.get_dimension(self.p.ydim)]
vdims = [dim for dim in dset.dimensions() if dim.name not in kdims]
pts = hv.Points(dset, kdims=kdims, vdims=vdims)
if self.p.set_title:
ydata = dset.data[self.p.ydim]
title = 'mean = {:.3f}, std = {:.3f} ({:.0f})'.format(ydata.mean(),
ydata.std(),
len(ydata))
pts = pts.relabel(title)
return pts
class summary_table(Operation):
ydim = param.String(default=None)
filter_range = param.Dict(default={}, doc="""
Dictionary of filter bounds.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
def _process(self, dset, key=None):
ds = filter_dset(dset, filter_range=self.p.filter_range,
flags=self.p.flags, bad_flags=self.p.bad_flags)
if self.p.ydim is None:
cols = [dim.name for dim in dset.vdims]
else:
cols = [self.p.ydim]
df = ds.data[cols]
return hv.Table(df.describe().loc[['count', 'mean', 'std']])
def notify_stream(bounds, filter_stream, xdim, ydim):
"""
Function to attach to bounds stream as subscriber to notify FilterStream.
"""
l, b, r, t = bounds
filter_range = dict(filter_stream.filter_range)
for dim, (low, high) in [(xdim, (l, r)), (ydim, (b, t))]:
## If you want to take the intersection of x selections, e.g.
# if dim in filter_range:
# old_low, old_high = filter_range[dim]
# filter_range[dim]= (max(old_low, low), min(old_high, high))
# else:
# filter_range[dim] = (low, high)
filter_range[dim] = (low, high)
filter_stream.event(filter_range=filter_range)
def reset_stream(filter_stream):
filter_stream.event(filter_range={}, flags=[], bad_flags=[])
class scattersky(ParameterizedFunction):
"""
Creates two datashaded views from a Dataset.
First plot is an x-y scatter plot, with colormap according to density
of points; second plot is a sky plot where the colormap corresponds
to the average y values of the first plot in each datashaded pixel.
"""
xdim = param.String(default='x', doc="""
Dimension of the dataset to use as x-coordinate""")
ydim = param.String(default='y0', doc="""
Dimension of the dataset to use as y-coordinate""")
scatter_cmap = param.String(default='fire', doc="""
Colormap to use for the scatter plot""")
sky_cmap = param.String(default='coolwarm', doc="""
Colormap to use for the sky plot""")
height = param.Number(default=300, doc="""
Height in pixels of the combined layout""")
width = param.Number(default=900, doc="""
Width in pixels of the combined layout""")
filter_stream = param.ClassSelector(default=FilterStream(), class_=FilterStream,
doc="Stream to which selection ranges get added.")
show_rawsky = param.Boolean(default=False, doc="""
Whether to show the "unselected" sky points in greyscale when there is a selection.""")
def __call__(self, dset, **params):
self.p = ParamOverrides(self, params)
if self.p.ydim not in dset.dimensions():
raise ValueError('{} not in Dataset.'.format(self.p.ydim))
# Set up scatter plot
scatter_filterpoints = filterpoints.instance(xdim=self.p.xdim, ydim=self.p.ydim)
scatter_pts = hv.util.Dynamic(dset, operation=scatter_filterpoints,
streams=[self.p.filter_stream])
scatter_opts = dict(plot={'height':self.p.height, 'width':self.p.width - self.p.height},
# 'tools':['box_select']},
norm=dict(axiswise=True))
scatter_shaded = datashade(scatter_pts, cmap=cc.palette[self.p.scatter_cmap])
scatter = dynspread(scatter_shaded).opts(**scatter_opts)
# Set up sky plot
sky_filterpoints = filterpoints.instance(xdim='ra', ydim='dec', set_title=False)
sky_pts = hv.util.Dynamic(dset, operation=sky_filterpoints,
streams=[self.p.filter_stream])
sky_opts = dict(plot={'height':self.p.height, 'width':self.p.height},
# 'tools':['box_select']},
norm=dict(axiswise=True))
sky_shaded = datashade(sky_pts, cmap=cc.palette[self.p.sky_cmap],
aggregator=ds.mean(self.p.ydim), height=self.p.height,
width=self.p.width)
sky = dynspread(sky_shaded).opts(**sky_opts)
# Set up summary table
table = hv.util.Dynamic(dset, operation=summary_table.instance(ydim=self.p.ydim),
streams=[self.p.filter_stream])
table = table.opts(plot={'width':200})
# Set up BoundsXY streams to listen to box_select events and notify FilterStream
scatter_select = BoundsXY(source=scatter)
scatter_notifier = partial(notify_stream, filter_stream=self.p.filter_stream,
xdim=self.p.xdim, ydim=self.p.ydim)
scatter_select.add_subscriber(scatter_notifier)
sky_select = BoundsXY(source=sky)
sky_notifier = partial(notify_stream, filter_stream=self.p.filter_stream,
xdim='ra', ydim='dec')
sky_select.add_subscriber(sky_notifier)
# Reset
reset = Reset(source=scatter)
reset.add_subscriber(partial(reset_stream, self.p.filter_stream))
raw_scatter = datashade(scatter_filterpoints(dset), cmap=Greys9[::-1][:5])
if self.p.show_rawsky:
raw_sky = datashade(sky_filterpoints(dset), cmap=Greys9[::-1][:5])
return (table + raw_scatter*scatter + raw_sky*sky)
else:
return (table + raw_scatter*scatter + sky)
class multi_scattersky(ParameterizedFunction):
"""Layout of multiple scattersky plots, one for each vdim in dset
"""
filter_stream = param.ClassSelector(default=FilterStream(), class_=FilterStream)
ignored_dimensions = param.List(default=['x', 'ra', 'dec', 'label', 'ccdId', 'patchId'])
height = param.Number(default=300)
width = param.Number(default=900)
def _get_ydims(self, dset):
# Get dimensions from first Dataset type found in input
return [dim.name for dim in dset.traverse(lambda x: x, [hv.Dataset])[0].vdims]
# return [dim.name for dim in dset.traverse(lambda x: x, [hv.Dataset])[0].dimensions()
# if dim.name not in self.p.ignored_dimensions]
def __call__(self, dset, **params):
self.p = param.ParamOverrides(self, params)
return hv.Layout([scattersky(dset, filter_stream=self.p.filter_stream,
ydim=ydim, height=self.p.height, width=self.p.width)
for ydim in self._get_ydims(dset)]).cols(3).opts(plot={'merge_tools':False})
class skypoints(Operation):
"""Creates Points with ra, dec as kdims, and interesting stuff as vdims
"""
filter_range = param.Dict(default={}, doc="""
Dictionary of filter bounds.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
def _process(self, dset, key=None):
dset = filter_dset(dset, filter_range=self.p.filter_range,
flags=self.p.flags, bad_flags=self.p.bad_flags)
return hv.Points(dset, kdims=['ra', 'dec'], vdims=dset.vdims + ['label'])
class skyplot(ParameterizedFunction):
"""Datashaded + decimated RA/dec plot, with colormap of third dimension
"""
cmap = param.String(default='coolwarm', doc="""
Colormap to use.""")
aggregator = param.ObjectSelector(default='mean', objects=['mean', 'std', 'count'], doc="""
Aggregator for datashading.""")
vdim = param.String(default=None, doc="""
Dimension to use for colormap.""")
width = param.Number(default=None)
height = param.Number(default=None)
decimate_size = param.Number(default=5, doc="""
Size of (invisible) decimated points.""")
filter_stream = param.ClassSelector(default=FilterStream(), class_=FilterStream)
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
def __call__(self, dset, **params):
self.p = ParamOverrides(self, params)
if self.p.vdim is None:
vdim = dset.vdims[0].name
else:
vdim = self.p.vdim
pts = hv.util.Dynamic(dset, operation=skypoints,
streams=[self.p.filter_stream])
if self.p.aggregator == 'mean':
aggregator = ds.mean(vdim)
elif self.p.aggregator == 'std':
aggregator = ds.std(vdim)
elif self.p.aggregator == 'count':
aggregator = ds.count()
kwargs = dict(cmap=cc.palette[self.p.cmap],
aggregator=aggregator)
if self.p.width is not None:
kwargs.update(width=self.p.width, height=self.p.height)
# streams=[hv.streams.RangeXY])
decimate_opts = dict(plot={'tools':['hover', 'box_select']},
style={'alpha':0, 'size':self.p.decimate_size,
'nonselection_alpha':0})
decimated = decimate(pts).opts(**decimate_opts)
sky_shaded = datashade(pts, **kwargs)
return dynspread(sky_shaded) * decimated
class skyplot_layout(ParameterizedFunction):
"""Layout of skyplots with linked crosshair
"""
crosshair = param.Boolean(default=True)
def __call__(self, skyplots, **params):
self.p = param.ParamOverrides(self, params)
pointer = hv.streams.PointerXY(x=0, y=0)
cross_opts = dict(style={'line_width':1, 'color':'black'})
cross_dmap = hv.DynamicMap(lambda x, y: (hv.VLine(x).opts(**cross_opts) *
hv.HLine(y).opts(**cross_opts)), streams=[pointer])
plots = []
for s in skyplots:
if self.p.crosshair:
plot = (s*cross_dmap).relabel(s.label)
else:
plot = s
plots.append(plot)
return hv.Layout(plots)
class skyshade(Operation):
"""Experimental
"""
cmap = param.String(default='coolwarm')
aggregator = param.ObjectSelector(default='mean', objects=['mean', 'std', 'count'])
width = param.Number(default=None)
height = param.Number(default=None)
vdim = param.String(default='y')
decimate_size = param.Number(default=5)
max_samples = param.Number(default=10000)
def _process(self, element, key=None):
vdim = self.p.vdim
if self.p.aggregator == 'mean':
aggregator = ds.mean(vdim)
elif self.p.aggregator == 'std':
aggregator = ds.std(vdim)
elif self.p.aggregator == 'count':
aggregator = ds.count()
kwargs = dict(cmap=cc.palette[self.p.cmap],
aggregator=aggregator)
if self.p.width is not None:
kwargs.update(width=self.p.width, height=self.p.height,
streams=[hv.streams.RangeXY])
datashaded = dynspread(datashade(element, **kwargs))
# decimate_opts = dict(plot={'tools':['hover', 'box_select']},
# style={'alpha':0, 'size':self.p.decimate_size,
# 'nonselection_alpha':0})
# decimated = decimate(element, max_samples=self.p.max_samples).opts(**decimate_opts)
return datashaded #* decimated
| {
"repo_name": "timothydmorton/qa_explorer",
"path": "explorer/plots.py",
"copies": "1",
"size": "17724",
"license": "mit",
"hash": -6058055430368270000,
"line_mean": 39.2818181818,
"line_max": 104,
"alpha_frac": 0.5960279847,
"autogenerated": false,
"ratio": 3.793664383561644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9832022875998696,
"avg_score": 0.011533898452589423,
"num_lines": 440
} |
from functools import partial
import pytest
from bs4 import BeautifulSoup
from flask import url_for
from freezegun import freeze_time
from app.main.forms import FieldWithNoneOption
from tests.conftest import SERVICE_ONE_ID, normalize_spaces, sample_uuid
def test_non_logged_in_user_can_see_homepage(
client,
mock_get_service_and_organisation_counts,
):
response = client.get(url_for('main.index'))
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.text.strip() == (
'Send emails, text messages and letters to your users'
)
assert page.select_one('a[role=button][draggable=false]')['href'] == url_for(
'main.register'
)
assert page.select_one('meta[name=description]')['content'].strip() == (
'GOV.UK Notify lets you send emails, text messages and letters '
'to your users. Try it now if you work in central government, a '
'local authority, or the NHS.'
)
assert normalize_spaces(page.select_one('#whos-using-notify').text) == (
'Who’s using GOV.UK Notify '
'There are 111 organisations and 9,999 services using Notify. '
'See the list of services and organisations.'
)
assert page.select_one('#whos-using-notify a')['href'] == url_for(
'main.performance'
)
def test_logged_in_user_redirects_to_choose_account(
client_request,
api_user_active,
mock_get_user,
mock_get_user_by_email,
mock_login,
):
client_request.get(
'main.index',
_expected_status=302,
)
client_request.get(
'main.sign_in',
_expected_status=302,
_expected_redirect=url_for('main.show_accounts_or_dashboard', _external=True)
)
def test_robots(client_request):
client_request.get_url('/robots.txt', _expected_status=404)
@pytest.mark.parametrize('endpoint, kwargs', (
('sign_in', {}),
('support', {}),
('support_public', {}),
('triage', {}),
('feedback', {'ticket_type': 'ask-question-give-feedback'}),
('feedback', {'ticket_type': 'general'}),
('feedback', {'ticket_type': 'report-problem'}),
('bat_phone', {}),
('thanks', {}),
('register', {}),
('features_email', {}),
pytest.param('index', {}, marks=pytest.mark.xfail(raises=AssertionError)),
))
@freeze_time('2012-12-12 12:12') # So we don’t go out of business hours
def test_hiding_pages_from_search_engines(
client,
mock_get_service_and_organisation_counts,
endpoint,
kwargs,
):
response = client.get(url_for(f'main.{endpoint}', **kwargs))
assert 'X-Robots-Tag' in response.headers
assert response.headers['X-Robots-Tag'] == 'noindex'
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.select_one('meta[name=robots]')['content'] == 'noindex'
@pytest.mark.parametrize('view', [
'cookies', 'privacy', 'pricing', 'terms', 'roadmap',
'features', 'documentation', 'security',
'message_status', 'features_email', 'features_sms',
'features_letters', 'how_to_pay', 'get_started',
'guidance_index', 'branding_and_customisation',
'create_and_send_messages', 'edit_and_format_messages',
'send_files_by_email', 'upload_a_letter', 'who_can_use_notify',
])
def test_static_pages(
client_request,
mock_get_organisation_by_domain,
view,
):
request = partial(client_request.get, 'main.{}'.format(view))
# Check the page loads when user is signed in
page = request()
assert not page.select_one('meta[name=description]')
# Check it still works when they don’t have a recent service
with client_request.session_transaction() as session:
session['service_id'] = None
request()
# Check it still works when they sign out
client_request.logout()
with client_request.session_transaction() as session:
session['service_id'] = None
session['user_id'] = None
request()
def test_guidance_pages_link_to_service_pages_when_signed_in(
client_request,
):
request = partial(client_request.get, 'main.edit_and_format_messages')
selector = '.list-number li a'
# Check the page loads when user is signed in
page = request()
assert page.select_one(selector)['href'] == url_for(
'main.choose_template',
service_id=SERVICE_ONE_ID,
)
# Check it still works when they don’t have a recent service
with client_request.session_transaction() as session:
session['service_id'] = None
page = request()
assert not page.select_one(selector)
# Check it still works when they sign out
client_request.logout()
with client_request.session_transaction() as session:
session['service_id'] = None
session['user_id'] = None
page = request()
assert not page.select_one(selector)
@pytest.mark.parametrize('view, expected_view', [
('information_risk_management', 'security'),
('old_integration_testing', 'integration_testing'),
('old_roadmap', 'roadmap'),
('information_risk_management', 'security'),
('old_terms', 'terms'),
('information_security', 'using_notify'),
('old_using_notify', 'using_notify'),
('delivery_and_failure', 'message_status'),
('callbacks', 'documentation'),
('who_its_for', 'who_can_use_notify'),
])
def test_old_static_pages_redirect(
client,
view,
expected_view
):
response = client.get(url_for('main.{}'.format(view)))
assert response.status_code == 301
assert response.location == url_for(
'main.{}'.format(expected_view),
_external=True
)
def test_message_status_page_contains_message_status_ids(client_request):
# The 'email-statuses' and 'sms-statuses' id are linked to when we display a message status,
# so this test ensures we don't accidentally remove them
page = client_request.get('main.message_status')
assert page.find(id='email-statuses')
assert page.find(id='sms-statuses')
def test_old_using_notify_page(client_request):
client_request.get('main.using_notify', _expected_status=410)
def test_old_integration_testing_page(
client_request,
):
page = client_request.get(
'main.integration_testing',
_expected_status=410,
)
assert normalize_spaces(page.select_one('.govuk-grid-row').text) == (
'Integration testing '
'This information has moved. '
'Refer to the documentation for the client library you are using.'
)
assert page.select_one('.govuk-grid-row a')['href'] == url_for(
'main.documentation'
)
def test_terms_page_has_correct_content(client_request):
terms_page = client_request.get('main.terms')
assert normalize_spaces(terms_page.select('main p')[0].text) == (
'These terms apply to your service’s use of GOV.UK Notify. '
'You must be the service manager to accept them.'
)
def test_css_is_served_from_correct_path(client_request):
page = client_request.get('main.documentation') # easy static page
for index, link in enumerate(
page.select('link[rel=stylesheet]')
):
assert link['href'].startswith([
'https://static.example.com/stylesheets/main.css?',
'https://static.example.com/stylesheets/print.css?',
][index])
def test_resources_that_use_asset_path_variable_have_correct_path(client_request):
page = client_request.get('main.documentation') # easy static page
logo_svg_fallback = page.select_one('.govuk-header__logotype-crown-fallback-image')
assert logo_svg_fallback['src'].startswith('https://static.example.com/images/govuk-logotype-crown.png')
@pytest.mark.parametrize('extra_args, email_branding_retrieved', (
(
{},
False,
),
(
{'branding_style': '__NONE__'},
False,
),
(
{'branding_style': sample_uuid()},
True,
),
))
def test_email_branding_preview(
client_request,
mock_get_email_branding,
extra_args,
email_branding_retrieved,
):
page = client_request.get(
'main.email_template',
_test_page_title=False,
**extra_args
)
assert page.title.text == 'Email branding preview'
assert mock_get_email_branding.called is email_branding_retrieved
@pytest.mark.parametrize('branding_style, filename', [
('hm-government', 'hm-government'),
(None, 'no-branding'),
(FieldWithNoneOption.NONE_OPTION_VALUE, 'no-branding')
])
def test_letter_template_preview_links_to_the_correct_image(
client_request,
mocker,
mock_get_letter_branding_by_id,
branding_style,
filename,
):
page = client_request.get(
'main.letter_template',
_test_page_title=False,
# Letter HTML doesn’t use the Design System, so elements won’t have class attributes
_test_for_elements_without_class=False,
branding_style=branding_style
)
image_link = page.find('img')['src']
assert image_link == url_for(
'no_cookie.letter_branding_preview_image',
filename=filename,
page=1
)
def test_letter_template_preview_headers(
client,
mock_get_letter_branding_by_id,
):
response = client.get(
url_for('main.letter_template', branding_style='hm-government')
)
assert response.headers.get('X-Frame-Options') == 'SAMEORIGIN'
def test_letter_spec_redirect(client_request):
client_request.get(
'main.letter_spec',
_expected_status=302,
_expected_redirect=(
'https://docs.notifications.service.gov.uk'
'/documentation/images/notify-pdf-letter-spec-v2.4.pdf'
),
)
def test_letter_spec_redirect_with_non_logged_in_user(client_request):
client_request.logout()
client_request.get(
'main.letter_spec',
_expected_status=302,
_expected_redirect=(
'https://docs.notifications.service.gov.uk'
'/documentation/images/notify-pdf-letter-spec-v2.4.pdf'
),
)
def test_font_preload(
client_request,
mock_get_service_and_organisation_counts,
):
client_request.logout()
page = client_request.get('main.index', _test_page_title=False)
preload_tags = page.select('link[rel=preload][as=font][type="font/woff2"][crossorigin]')
assert len(preload_tags) == 4, 'Run `npm run build` to copy fonts into app/static/fonts/'
for element in preload_tags:
assert element['href'].startswith('https://static.example.com/fonts/')
assert element['href'].endswith('.woff2')
| {
"repo_name": "alphagov/notifications-admin",
"path": "tests/app/main/views/test_index.py",
"copies": "1",
"size": "10613",
"license": "mit",
"hash": 6913061721507664000,
"line_mean": 29.6329479769,
"line_max": 108,
"alpha_frac": 0.6465704312,
"autogenerated": false,
"ratio": 3.5531344284277573,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9698276216238215,
"avg_score": 0.00028572867790818446,
"num_lines": 346
} |
from functools import partial
import pytest
from flask import url_for
letters_urls = [
partial(url_for, 'main.add_service_template', template_type='letter'),
]
@pytest.mark.parametrize('url', letters_urls)
@pytest.mark.parametrize('permissions, response_code', [
(['letter'], 200),
([], 403)
])
def test_letters_access_restricted(
client_request,
platform_admin_user,
mocker,
permissions,
response_code,
mock_get_service_templates,
url,
service_one,
):
service_one['permissions'] = permissions
client_request.login(platform_admin_user)
client_request.get_url(
url(service_id=service_one['id']),
_follow_redirects=True,
_expected_status=response_code,
)
@pytest.mark.parametrize('url', letters_urls)
def test_letters_lets_in_without_permission(
client,
mocker,
mock_login,
mock_has_permissions,
api_user_active,
mock_get_service_templates,
url,
service_one
):
service_one['permissions'] = ['letter']
mocker.patch('app.service_api_client.get_service', return_value={"data": service_one})
client.login(api_user_active)
response = client.get(url(service_id=service_one['id']))
assert api_user_active['permissions'] == {}
assert response.status_code == 200
@pytest.mark.parametrize('permissions, choices', [
(
['email', 'sms', 'letter'],
['Email', 'Text message', 'Letter', 'Copy an existing template']
),
(
['email', 'sms'],
['Email', 'Text message', 'Copy an existing template']
),
])
def test_given_option_to_add_letters_if_allowed(
client_request,
service_one,
mocker,
mock_get_service_templates,
mock_get_template_folders,
mock_get_organisations_and_services_for_user,
mock_get_api_keys,
permissions,
choices,
):
service_one['permissions'] = permissions
page = client_request.get('main.choose_template', service_id=service_one['id'])
radios = page.select('#add_new_template_form input[type=radio]')
labels = page.select('#add_new_template_form label')
assert len(radios) == len(choices)
assert len(labels) == len(choices)
for index, choice in enumerate(permissions):
assert radios[index]['value'] == choice
for index, label in enumerate(choices):
assert labels[index].text.strip() == label
| {
"repo_name": "alphagov/notifications-admin",
"path": "tests/app/main/views/test_letters.py",
"copies": "1",
"size": "2377",
"license": "mit",
"hash": -1073211712338258000,
"line_mean": 25.1208791209,
"line_max": 90,
"alpha_frac": 0.6512410602,
"autogenerated": false,
"ratio": 3.6124620060790273,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47637030662790275,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import pytest
from plumbum.cmd import pg_dump
from pg_grant import parse_acl_item, FunctionInfo, PgObjectType
from pg_grant.query import (
get_all_function_acls, get_all_sequence_acls, get_all_table_acls,
get_all_type_acls)
pytestmark = pytest.mark.nocontainer
def _priv_acls(conn, acls, type_, revoke):
for obj in acls:
arg_types = None
if isinstance(obj, FunctionInfo):
arg_types = obj.arg_types
if obj.acl is not None:
for acl in obj.acl:
parsed = parse_acl_item(acl)
if revoke:
statements = parsed.as_revoke_statements(
type_, obj.name, schema=obj.schema, arg_types=arg_types)
else:
statements = parsed.as_grant_statements(
type_, obj.name, schema=obj.schema, arg_types=arg_types)
for stmt in statements:
conn.execute(stmt)
grant_table_acls = partial(_priv_acls, type_=PgObjectType.TABLE, revoke=False)
revoke_table_acls = partial(_priv_acls, type_=PgObjectType.TABLE, revoke=True)
grant_sequence_acls = partial(_priv_acls, type_=PgObjectType.SEQUENCE, revoke=False)
revoke_sequence_acls = partial(_priv_acls, type_=PgObjectType.SEQUENCE, revoke=True)
grant_type_acls = partial(_priv_acls, type_=PgObjectType.TYPE, revoke=False)
revoke_type_acls = partial(_priv_acls, type_=PgObjectType.TYPE, revoke=True)
grant_function_acls = partial(_priv_acls, type_=PgObjectType.FUNCTION, revoke=False)
revoke_function_acls = partial(_priv_acls, type_=PgObjectType.FUNCTION, revoke=True)
@pytest.mark.parametrize('get, revoke, grant', [
(get_all_table_acls, revoke_table_acls, grant_table_acls),
(get_all_sequence_acls, revoke_sequence_acls, grant_sequence_acls),
(get_all_type_acls, revoke_type_acls, grant_type_acls),
(get_all_function_acls, revoke_function_acls, grant_function_acls),
])
def test_revoke_grant_schema_relations(connection, postgres_url, get, revoke, grant):
cmd = pg_dump['--schema-only', postgres_url]
code, dump1, _ = cmd.run()
assert code == 0
acls = get(connection, 'public')
revoke(connection, acls)
code, dump2, _ = cmd.run()
assert code == 0
assert dump1 != dump2
grant(connection, acls)
code, dump3, _ = cmd.run()
assert code == 0
assert dump1 == dump3
| {
"repo_name": "RazerM/pg_grant",
"path": "tests/test_round_trip.py",
"copies": "1",
"size": "2417",
"license": "mit",
"hash": -8838675945675635000,
"line_mean": 33.5285714286,
"line_max": 85,
"alpha_frac": 0.6565990898,
"autogenerated": false,
"ratio": 3.4827089337175794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9633352445765375,
"avg_score": 0.0011911155504406756,
"num_lines": 70
} |
from functools import partial
import pytest
import numpy as np
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import rand_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import calinski_harabasz_score
from sklearn.metrics.cluster import davies_bouldin_score
from sklearn.utils._testing import assert_allclose
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
# - SUPERVISED_METRICS: all supervised cluster metrics - (when given a
# ground truth value)
# - UNSUPERVISED_METRICS: all unsupervised cluster metrics
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
SUPERVISED_METRICS = {
"adjusted_mutual_info_score": adjusted_mutual_info_score,
"adjusted_rand_score": adjusted_rand_score,
"rand_score": rand_score,
"completeness_score": completeness_score,
"homogeneity_score": homogeneity_score,
"mutual_info_score": mutual_info_score,
"normalized_mutual_info_score": normalized_mutual_info_score,
"v_measure_score": v_measure_score,
"fowlkes_mallows_score": fowlkes_mallows_score
}
UNSUPERVISED_METRICS = {
"silhouette_score": silhouette_score,
"silhouette_manhattan": partial(silhouette_score, metric='manhattan'),
"calinski_harabasz_score": calinski_harabasz_score,
"davies_bouldin_score": davies_bouldin_score
}
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics
# that are symmetric with respect to their input argument y_true and y_pred.
#
# --------------------------------------------------------------------
# Symmetric with respect to their input arguments y_true and y_pred.
# Symmetric metrics only apply to supervised clusters.
SYMMETRIC_METRICS = [
"adjusted_rand_score", "rand_score", "v_measure_score",
"mutual_info_score", "adjusted_mutual_info_score",
"normalized_mutual_info_score", "fowlkes_mallows_score"
]
NON_SYMMETRIC_METRICS = ["homogeneity_score", "completeness_score"]
# Metrics whose upper bound is 1
NORMALIZED_METRICS = [
"adjusted_rand_score", "rand_score", "homogeneity_score",
"completeness_score", "v_measure_score", "adjusted_mutual_info_score",
"fowlkes_mallows_score", "normalized_mutual_info_score"
]
rng = np.random.RandomState(0)
y1 = rng.randint(3, size=30)
y2 = rng.randint(3, size=30)
def test_symmetric_non_symmetric_union():
assert (sorted(SYMMETRIC_METRICS + NON_SYMMETRIC_METRICS) ==
sorted(SUPERVISED_METRICS))
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.parametrize(
'metric_name, y1, y2',
[(name, y1, y2) for name in SYMMETRIC_METRICS]
)
def test_symmetry(metric_name, y1, y2):
metric = SUPERVISED_METRICS[metric_name]
assert metric(y1, y2) == pytest.approx(metric(y2, y1))
@pytest.mark.parametrize(
'metric_name, y1, y2',
[(name, y1, y2) for name in NON_SYMMETRIC_METRICS]
)
def test_non_symmetry(metric_name, y1, y2):
metric = SUPERVISED_METRICS[metric_name]
assert metric(y1, y2) != pytest.approx(metric(y2, y1))
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.parametrize("metric_name", NORMALIZED_METRICS)
def test_normalized_output(metric_name):
upper_bound_1 = [0, 0, 0, 1, 1, 1]
upper_bound_2 = [0, 0, 0, 1, 1, 1]
metric = SUPERVISED_METRICS[metric_name]
assert metric([0, 0, 0, 1, 1], [0, 0, 0, 1, 2]) > 0.0
assert metric([0, 0, 1, 1, 2], [0, 0, 1, 1, 1]) > 0.0
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
assert metric(upper_bound_1, upper_bound_2) == pytest.approx(1.0)
lower_bound_1 = [0, 0, 0, 0, 0, 0]
lower_bound_2 = [0, 1, 2, 3, 4, 5]
score = np.array([metric(lower_bound_1, lower_bound_2),
metric(lower_bound_2, lower_bound_1)])
assert not (score < 0).any()
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.parametrize(
"metric_name", dict(SUPERVISED_METRICS, **UNSUPERVISED_METRICS)
)
def test_permute_labels(metric_name):
# All clustering metrics do not change score due to permutations of labels
# that is when 0 and 1 exchanged.
y_label = np.array([0, 0, 0, 1, 1, 0, 1])
y_pred = np.array([1, 0, 1, 0, 1, 1, 0])
if metric_name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[metric_name]
score_1 = metric(y_pred, y_label)
assert_allclose(score_1, metric(1 - y_pred, y_label))
assert_allclose(score_1, metric(1 - y_pred, 1 - y_label))
assert_allclose(score_1, metric(y_pred, 1 - y_label))
else:
metric = UNSUPERVISED_METRICS[metric_name]
X = np.random.randint(10, size=(7, 10))
score_1 = metric(X, y_pred)
assert_allclose(score_1, metric(X, 1 - y_pred))
# 0.22 AMI and NMI changes
@pytest.mark.filterwarnings('ignore::FutureWarning')
@pytest.mark.parametrize(
"metric_name", dict(SUPERVISED_METRICS, **UNSUPERVISED_METRICS)
)
# For all clustering metrics Input parameters can be both
# in the form of arrays lists, positive, negative or string
def test_format_invariance(metric_name):
y_true = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 1, 2, 3, 4, 5, 6, 7]
def generate_formats(y):
y = np.array(y)
yield y, 'array of ints'
yield y.tolist(), 'list of ints'
yield [str(x) + "-a" for x in y.tolist()], 'list of strs'
yield (np.array([str(x) + "-a" for x in y.tolist()], dtype=object),
'array of strs')
yield y - 1, 'including negative ints'
yield y + 1, 'strictly positive ints'
if metric_name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[metric_name]
score_1 = metric(y_true, y_pred)
y_true_gen = generate_formats(y_true)
y_pred_gen = generate_formats(y_pred)
for (y_true_fmt, fmt_name), (y_pred_fmt, _) in zip(y_true_gen,
y_pred_gen):
assert score_1 == metric(y_true_fmt, y_pred_fmt)
else:
metric = UNSUPERVISED_METRICS[metric_name]
X = np.random.randint(10, size=(8, 10))
score_1 = metric(X, y_true)
assert score_1 == metric(X.astype(float), y_true)
y_true_gen = generate_formats(y_true)
for (y_true_fmt, fmt_name) in y_true_gen:
assert score_1 == metric(X, y_true_fmt)
@pytest.mark.parametrize("metric", SUPERVISED_METRICS.values())
def test_single_sample(metric):
# only the supervised metrics support single sample
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
metric([i], [j])
@pytest.mark.parametrize(
"metric_name, metric_func",
dict(SUPERVISED_METRICS, **UNSUPERVISED_METRICS).items()
)
def test_inf_nan_input(metric_name, metric_func):
if metric_name in SUPERVISED_METRICS:
invalids = [([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
else:
X = np.random.randint(10, size=(2, 10))
invalids = [(X, [np.inf, np.inf]),
(X, [np.nan, np.nan]),
(X, [np.nan, np.inf])]
with pytest.raises(ValueError, match='contains NaN, infinity'):
for args in invalids:
metric_func(*args)
| {
"repo_name": "anntzer/scikit-learn",
"path": "sklearn/metrics/cluster/tests/test_common.py",
"copies": "9",
"size": "8127",
"license": "bsd-3-clause",
"hash": -8730547908675925000,
"line_mean": 37.1549295775,
"line_max": 78,
"alpha_frac": 0.6489479513,
"autogenerated": false,
"ratio": 3.1126005361930296,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.826154848749303,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import pytest
empty = object()
class cached_property(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class SimpleProxy(object):
def __init__(self, factory):
self.factory = factory
self.object = empty
def __str__(self):
if self.object is empty:
self.object = self.factory()
return str(self.object)
class CachedPropertyProxy(object):
def __init__(self, factory):
self.factory = factory
@cached_property
def object(self):
return self.factory()
def __str__(self):
return str(self.object)
class LocalsSimpleProxy(object):
def __init__(self, factory):
self.factory = factory
self.object = empty
def __str__(self, func=str):
if self.object is empty:
self.object = self.factory()
return func(self.object)
class LocalsCachedPropertyProxy(object):
def __init__(self, factory):
self.factory = factory
@cached_property
def object(self):
return self.factory()
def __str__(self, func=str):
return func(self.object)
@pytest.fixture(scope="module", params=["SimpleProxy", "CachedPropertyProxy", "LocalsSimpleProxy", "LocalsCachedPropertyProxy"])
def impl(request):
return globals()[request.param]
def test_proto(benchmark, impl):
obj = "foobar"
proxied = impl(lambda: obj)
result = benchmark(partial(str, proxied))
assert result == obj
| {
"repo_name": "ionelmc/pytest-benchmark",
"path": "tests/test_sample.py",
"copies": "1",
"size": "1613",
"license": "bsd-2-clause",
"hash": -7799659396215641000,
"line_mean": 21.4027777778,
"line_max": 128,
"alpha_frac": 0.6168629882,
"autogenerated": false,
"ratio": 3.9150485436893203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010766580534022394,
"num_lines": 72
} |
from functools import partial
import pytest
from asphalt.core.resource import (
Resource, ResourceEventType, ResourceCollection, ResourceConflict, ResourceNotFoundError,
ResourceEventListener)
class TestResource:
@pytest.fixture
def resource(self):
return Resource(6, ('int', 'object'), 'foo', 'bar.foo')
def test_repr(self, resource: Resource):
assert repr(resource) == ("Resource(types=('int', 'object'), alias='foo', "
"value=6, context_var='bar.foo')")
def test_str(self, resource: Resource):
assert str(resource) == ("types=('int', 'object'), alias='foo', "
"value=6, context_var='bar.foo'")
class TestResourceEventListener:
@pytest.mark.parametrize('contextmanager', [False, True], ids=['unlisten', 'contextmanager'])
def test_unlisten(self, contextmanager):
callbacks = []
listener = ResourceEventListener(callbacks, lambda evt: None)
callbacks.append(listener)
if contextmanager:
with listener:
pass
else:
listener.unlisten()
assert listener not in callbacks
class TestResourceManager:
@pytest.fixture
def resources(self):
return ResourceCollection()
@pytest.mark.asyncio
@pytest.mark.parametrize('delay', [False, True], ids=['immediate', 'delayed'])
def test_add(self, resources: ResourceCollection, event_loop, delay):
"""Tests that a resource is properly added to the collection and listeners are notified."""
events = []
resources.add_listener(ResourceEventType.added, events.append)
if delay:
call = partial(resources.add, 6, 'foo', 'foo.bar', extra_types=float)
event_loop.call_soon(call)
else:
resources.add(6, 'foo', 'foo.bar', extra_types=float)
value = yield from resources.request(int, 'foo', timeout=2)
assert value == 6
assert len(events) == 1
resource = events[0].resource
assert resource.alias == 'foo'
assert resource.context_var == 'foo.bar'
assert resource.value == 6
assert resource.types == ('int', 'float')
def test_add_name_conflict(self, resources: ResourceCollection):
"""Tests that add() won't let replace existing resources."""
resource = resources.add(5, 'foo')
exc = pytest.raises(ResourceConflict, resources.add, 4, 'foo')
assert exc.value.resource is resource
assert str(exc.value) == ('"foo" conflicts with Resource(types=(\'int\',), alias=\'foo\', '
'value=5, context_var=None)')
@pytest.mark.asyncio
def test_add_event_conflict(self, resources: ResourceCollection):
"""
Tests that a resource adding is cancelled if an event listener raises ResourceConflict.
"""
def listener(event):
raise ResourceConflict('conflict test', event.resource)
resources.add_listener(ResourceEventType.added, listener)
pytest.raises(ResourceConflict, resources.add, 5)
with pytest.raises(ResourceNotFoundError):
yield from resources.request(int, timeout=0)
@pytest.mark.asyncio
def test_remove(self, resources: ResourceCollection):
"""Tests that resources can be removed and that the listeners are notified."""
resource = resources.add(4)
events = []
resources.add_listener(ResourceEventType.removed, events.append)
resources.remove(resource)
assert len(events) == 1
assert events[0].resource.value == 4
with pytest.raises(ResourceNotFoundError):
yield from resources.request(int, timeout=0)
def test_remove_nonexistent(self, resources: ResourceCollection):
resource = Resource(5, ('int',), 'default', None)
exc = pytest.raises(LookupError, resources.remove, resource)
assert str(exc.value) == ("Resource(types=('int',), alias='default', value=5, "
"context_var=None) not found in this collection")
@pytest.mark.asyncio
def test_request_timeout(self, resources: ResourceCollection):
with pytest.raises(ResourceNotFoundError) as exc:
yield from resources.request(int, timeout=0.2)
assert str(exc.value) == "no matching resource was found for type='int' alias='default'"
@pytest.mark.asyncio
@pytest.mark.parametrize('bad_arg, errormsg', [
('type', 'type must be a type or a nonempty string'),
('alias', 'alias must be a nonempty string')
], ids=['bad_type', 'bad_alias'])
def test_bad_request(self, resources: ResourceCollection, bad_arg, errormsg):
type_ = None if bad_arg == 'type' else 'foo'
alias = None if bad_arg == 'alias' else 'foo'
with pytest.raises(ValueError) as exc:
yield from resources.request(type_, alias)
assert str(exc.value) == errormsg
| {
"repo_name": "Siecje/asphalt",
"path": "tests/test_resource.py",
"copies": "1",
"size": "5006",
"license": "apache-2.0",
"hash": -968006743307447400,
"line_mean": 38.109375,
"line_max": 99,
"alpha_frac": 0.6292449061,
"autogenerated": false,
"ratio": 4.31551724137931,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.544476214747931,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import pytest
from hiku.executors.queue import Queue
class DummyFuture:
def __init__(self, fn, args, kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
self.fn(*self.args, **self.kwargs)
class DummyExecutor:
def submit(self, fn, *args, **kwargs):
return DummyFuture(fn, args, kwargs)
def log_call(fn):
def wrapper(results, *args, **kwargs):
results.append('-> {}'.format(fn.__name__))
res = fn(results, *args, **kwargs)
results.append('<- {}'.format(fn.__name__))
return res
return wrapper
SCRIPT = [
'-> level1_init',
'<- level1_init',
'.. level1_task1',
'-> level1_task1_callback',
'-> level2_init',
'<- level2_init',
'<- level1_task1_callback',
'.. level2_task1',
'-> level2_task1_callback',
'-> level3_init',
'<- level3_init',
'<- level2_task1_callback',
'.. level3_task1',
'-> level3_task1_callback',
'<- level3_task1_callback',
'.. level3_task2',
'-> level3_task2_callback',
'<- level3_task2_callback',
'-> level2_task_set3_callback',
'<- level2_task_set3_callback',
'.. level2_task2',
'-> level2_task2_callback',
'<- level2_task2_callback',
'-> level1_task_set2_callback',
'<- level1_task_set2_callback',
'.. level1_task2',
'-> level1_task2_callback',
'<- level1_task2_callback',
'-> level0_task_set1_callback',
'<- level0_task_set1_callback',
]
def func(results, arg):
results.append('.. {}'.format(arg))
@log_call
def level1_init(results, queue):
task_set1 = queue.fork(None)
level1_task1 = task_set1.submit(func, results, 'level1_task1')
queue.add_callback(level1_task1, partial(level1_task1_callback,
results, queue, task_set1))
queue.add_callback(task_set1, partial(level0_task_set1_callback,
results))
@log_call
def level1_task1_callback(results, queue, task_set1):
level2_init(results, queue, task_set1)
@log_call
def level2_init(results, queue, task_set1):
task_set2 = queue.fork(task_set1)
level2_task1 = task_set2.submit(func, results, 'level2_task1')
queue.add_callback(level2_task1, partial(level2_task1_callback,
results, queue, task_set2))
queue.add_callback(task_set2, partial(level1_task_set2_callback,
results, queue, task_set1))
@log_call
def level2_task1_callback(results, queue, task_set2):
level3_init(results, queue, task_set2)
@log_call
def level3_init(results, queue, task_set2):
task_set3 = queue.fork(task_set2)
level3_task1 = task_set3.submit(func, results, 'level3_task1')
queue.add_callback(level3_task1, partial(level3_task1_callback,
results, queue, task_set3))
queue.add_callback(task_set3, partial(level2_task_set3_callback,
results, queue, task_set2))
@log_call
def level3_task1_callback(results, queue, task_set3):
level3_task2 = task_set3.submit(func, results, 'level3_task2')
queue.add_callback(level3_task2, partial(level3_task2_callback, results))
@log_call
def level3_task2_callback(results):
pass
@log_call
def level2_task_set3_callback(results, queue, task_set2):
level2_task2 = task_set2.submit(func, results, 'level2_task2')
queue.add_callback(level2_task2, partial(level2_task2_callback, results))
@log_call
def level2_task2_callback(results):
pass
@log_call
def level1_task_set2_callback(results, queue, task_set1):
level1_task2 = task_set1.submit(func, results, 'level1_task2')
queue.add_callback(level1_task2, partial(level1_task2_callback,
results))
@log_call
def level1_task2_callback(results):
pass
@log_call
def level0_task_set1_callback(results):
pass
@pytest.fixture(name='queue')
def _queue():
return Queue(DummyExecutor())
@pytest.mark.parametrize('idx', [0, -1])
def test(idx, queue):
results = []
level1_init(results, queue)
# just to be sure that it is possible to pass empty list
queue.progress([])
while queue.__futures__:
task = queue.__futures__[idx]
task.run()
queue.progress([task])
assert results == SCRIPT
assert not queue._futures
assert not queue._forks
assert not queue._callbacks
| {
"repo_name": "vmagamedov/hiku",
"path": "tests/test_executor_queue.py",
"copies": "1",
"size": "4543",
"license": "bsd-3-clause",
"hash": 5706106409444908000,
"line_mean": 24.96,
"line_max": 77,
"alpha_frac": 0.6123706802,
"autogenerated": false,
"ratio": 3.2357549857549857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9348125665954985,
"avg_score": 0,
"num_lines": 175
} |
from functools import partial
import pytest
from notifications_utils.recipients import (
InvalidEmailError,
InvalidPhoneError,
allowed_to_send_to,
format_phone_number_human_readable,
format_recipient,
get_international_phone_info,
international_phone_info,
is_uk_phone_number,
normalise_phone_number,
try_validate_and_format_phone_number,
validate_and_format_phone_number,
validate_email_address,
validate_phone_number,
validate_recipient,
)
valid_uk_phone_numbers = [
'7123456789',
'07123456789',
'07123 456789',
'07123-456-789',
'00447123456789',
'00 44 7123456789',
'+447123456789',
'+44 7123 456 789',
'+44 (0)7123 456 789',
'\u200B\t\t+44 (0)7123 \uFEFF 456 789 \r\n',
]
valid_international_phone_numbers = [
'71234567890', # Russia
'1-202-555-0104', # USA
'+12025550104', # USA
'0012025550104', # USA
'+0012025550104', # USA
'23051234567', # Mauritius,
'+682 12345', # Cook islands
'+3312345678',
'003312345678',
'1-2345-12345-12345', # 15 digits
]
valid_phone_numbers = valid_uk_phone_numbers + valid_international_phone_numbers
invalid_uk_phone_numbers = sum([
[
(phone_number, error) for phone_number in group
] for error, group in [
('Too many digits', (
'712345678910',
'0712345678910',
'0044712345678910',
'0044712345678910',
'+44 (0)7123 456 789 10',
)),
('Not enough digits', (
'0712345678',
'004471234567',
'00447123456',
'+44 (0)7123 456 78',
)),
('Not a UK mobile number', (
'08081 570364',
'+44 8081 570364',
'0117 496 0860',
'+44 117 496 0860',
'020 7946 0991',
'+44 20 7946 0991',
)),
('Must not contain letters or symbols', (
'07890x32109',
'07123 456789...',
'07123 ☟☜⬇⬆☞☝',
'07123☟☜⬇⬆☞☝',
'07";DROP TABLE;"',
'+44 07ab cde fgh',
'ALPHANUM3R1C',
))
]
], [])
invalid_phone_numbers = list(filter(
lambda number: number[0] not in {
'712345678910', # Could be Russia
},
invalid_uk_phone_numbers
)) + [
('800000000000', 'Not a valid country prefix'),
('1234567', 'Not enough digits'),
('+682 1234', 'Not enough digits'), # Cook Islands phone numbers can be 5 digits
('+12345 12345 12345 6', 'Too many digits'),
]
valid_email_addresses = (
'email@domain.com',
'email@domain.COM',
'firstname.lastname@domain.com',
'firstname.o\'lastname@domain.com',
'email@subdomain.domain.com',
'firstname+lastname@domain.com',
'1234567890@domain.com',
'email@domain-one.com',
'_______@domain.com',
'email@domain.name',
'email@domain.superlongtld',
'email@domain.co.jp',
'firstname-lastname@domain.com',
'info@german-financial-services.vermögensberatung',
'info@german-financial-services.reallylongarbitrarytldthatiswaytoohugejustincase',
'japanese-info@例え.テスト',
'email@double--hyphen.com'
)
invalid_email_addresses = (
'email@123.123.123.123',
'email@[123.123.123.123]',
'plainaddress',
'@no-local-part.com',
'Outlook Contact <outlook-contact@domain.com>',
'no-at.domain.com',
'no-tld@domain',
';beginning-semicolon@domain.co.uk',
'middle-semicolon@domain.co;uk',
'trailing-semicolon@domain.com;',
'"email+leading-quotes@domain.com',
'email+middle"-quotes@domain.com',
'"quoted-local-part"@domain.com',
'"quoted@domain.com"',
'lots-of-dots@domain..gov..uk',
'two-dots..in-local@domain.com',
'multiple@domains@domain.com',
'spaces in local@domain.com',
'spaces-in-domain@dom ain.com',
'underscores-in-domain@dom_ain.com',
'pipe-in-domain@example.com|gov.uk',
'comma,in-local@gov.uk',
'comma-in-domain@domain,gov.uk',
'pound-sign-in-local£@domain.com',
'local-with-’-apostrophe@domain.com',
'local-with-”-quotes@domain.com',
'domain-starts-with-a-dot@.domain.com',
'brackets(in)local@domain.com',
'email-too-long-{}@example.com'.format('a' * 320),
'incorrect-punycode@xn---something.com'
)
@pytest.mark.parametrize("phone_number", valid_international_phone_numbers)
def test_detect_international_phone_numbers(phone_number):
assert is_uk_phone_number(phone_number) is False
@pytest.mark.parametrize("phone_number", valid_uk_phone_numbers)
def test_detect_uk_phone_numbers(phone_number):
assert is_uk_phone_number(phone_number) is True
@pytest.mark.parametrize("phone_number, expected_info", [
('07900900123', international_phone_info(
international=False,
crown_dependency=False,
country_prefix='44', # UK
billable_units=1,
)),
('07700900123', international_phone_info(
international=False,
crown_dependency=False,
country_prefix='44', # Number in TV range
billable_units=1,
)),
('07700800123', international_phone_info(
international=True,
crown_dependency=True,
country_prefix='44', # UK Crown dependency, so prefix same as UK
billable_units=1,
)),
('20-12-1234-1234', international_phone_info(
international=True,
crown_dependency=False,
country_prefix='20', # Egypt
billable_units=3,
)),
('00201212341234', international_phone_info(
international=True,
crown_dependency=False,
country_prefix='20', # Egypt
billable_units=3,
)),
('1664000000000', international_phone_info(
international=True,
crown_dependency=False,
country_prefix='1664', # Montserrat
billable_units=1,
)),
('71234567890', international_phone_info(
international=True,
crown_dependency=False,
country_prefix='7', # Russia
billable_units=1,
)),
('1-202-555-0104', international_phone_info(
international=True,
crown_dependency=False,
country_prefix='1', # USA
billable_units=1,
)),
('+23051234567', international_phone_info(
international=True,
crown_dependency=False,
country_prefix='230', # Mauritius
billable_units=2,
))
])
def test_get_international_info(phone_number, expected_info):
assert get_international_phone_info(phone_number) == expected_info
@pytest.mark.parametrize('phone_number', [
'abcd',
'079OO900123',
pytest.param('', marks=pytest.mark.xfail),
pytest.param('12345', marks=pytest.mark.xfail),
pytest.param('+12345', marks=pytest.mark.xfail),
pytest.param('1-2-3-4-5', marks=pytest.mark.xfail),
pytest.param('1 2 3 4 5', marks=pytest.mark.xfail),
pytest.param('(1)2345', marks=pytest.mark.xfail),
])
def test_normalise_phone_number_raises_if_unparseable_characters(phone_number):
with pytest.raises(InvalidPhoneError):
normalise_phone_number(phone_number)
@pytest.mark.parametrize('phone_number', [
'+21 4321 0987',
'00997 1234 7890',
'801234-7890',
'(8-0)-1234-7890',
])
def test_get_international_info_raises(phone_number):
with pytest.raises(InvalidPhoneError) as error:
get_international_phone_info(phone_number)
assert str(error.value) == 'Not a valid country prefix'
@pytest.mark.parametrize("phone_number", valid_uk_phone_numbers)
@pytest.mark.parametrize("validator", [
partial(validate_recipient, template_type='sms'),
partial(validate_recipient, template_type='sms', allow_international_sms=False),
partial(validate_phone_number),
partial(validate_phone_number, international=False),
])
def test_phone_number_accepts_valid_values(validator, phone_number):
try:
validator(phone_number)
except InvalidPhoneError:
pytest.fail('Unexpected InvalidPhoneError')
@pytest.mark.parametrize("phone_number", valid_phone_numbers)
@pytest.mark.parametrize("validator", [
partial(validate_recipient, template_type='sms', allow_international_sms=True),
partial(validate_phone_number, international=True),
])
def test_phone_number_accepts_valid_international_values(validator, phone_number):
try:
validator(phone_number)
except InvalidPhoneError:
pytest.fail('Unexpected InvalidPhoneError')
@pytest.mark.parametrize("phone_number", valid_uk_phone_numbers)
def test_valid_uk_phone_number_can_be_formatted_consistently(phone_number):
assert validate_and_format_phone_number(phone_number) == '447123456789'
@pytest.mark.parametrize("phone_number, expected_formatted", [
('71234567890', '71234567890'),
('1-202-555-0104', '12025550104'),
('+12025550104', '12025550104'),
('0012025550104', '12025550104'),
('+0012025550104', '12025550104'),
('23051234567', '23051234567'),
])
def test_valid_international_phone_number_can_be_formatted_consistently(phone_number, expected_formatted):
assert validate_and_format_phone_number(
phone_number, international=True
) == expected_formatted
@pytest.mark.parametrize("phone_number, error_message", invalid_uk_phone_numbers)
@pytest.mark.parametrize("validator", [
partial(validate_recipient, template_type='sms'),
partial(validate_recipient, template_type='sms', allow_international_sms=False),
partial(validate_phone_number),
partial(validate_phone_number, international=False),
])
def test_phone_number_rejects_invalid_values(validator, phone_number, error_message):
with pytest.raises(InvalidPhoneError) as e:
validator(phone_number)
assert error_message == str(e.value)
@pytest.mark.parametrize("phone_number, error_message", invalid_phone_numbers)
@pytest.mark.parametrize("validator", [
partial(validate_recipient, template_type='sms', allow_international_sms=True),
partial(validate_phone_number, international=True),
])
def test_phone_number_rejects_invalid_international_values(validator, phone_number, error_message):
with pytest.raises(InvalidPhoneError) as e:
validator(phone_number)
assert error_message == str(e.value)
@pytest.mark.parametrize("email_address", valid_email_addresses)
def test_validate_email_address_accepts_valid(email_address):
try:
assert validate_email_address(email_address) == email_address
except InvalidEmailError:
pytest.fail('Unexpected InvalidEmailError')
@pytest.mark.parametrize('email', [
' email@domain.com ',
'\temail@domain.com',
'\temail@domain.com\n',
'\u200Bemail@domain.com\u200B',
])
def test_validate_email_address_strips_whitespace(email):
assert validate_email_address(email) == 'email@domain.com'
@pytest.mark.parametrize("email_address", invalid_email_addresses)
def test_validate_email_address_raises_for_invalid(email_address):
with pytest.raises(InvalidEmailError) as e:
validate_email_address(email_address)
assert str(e.value) == 'Not a valid email address'
@pytest.mark.parametrize("phone_number", valid_uk_phone_numbers)
def test_validates_against_whitelist_of_phone_numbers(phone_number):
assert allowed_to_send_to(phone_number, ['07123456789', '07700900460', 'test@example.com'])
assert not allowed_to_send_to(phone_number, ['07700900460', '07700900461', 'test@example.com'])
@pytest.mark.parametrize('recipient_number, allowlist_number', [
['1-202-555-0104', '0012025550104'],
['0012025550104', '1-202-555-0104'],
])
def test_validates_against_whitelist_of_international_phone_numbers(recipient_number, allowlist_number):
assert allowed_to_send_to(recipient_number, [allowlist_number])
@pytest.mark.parametrize("email_address", valid_email_addresses)
def test_validates_against_whitelist_of_email_addresses(email_address):
assert not allowed_to_send_to(email_address, ['very_special_and_unique@example.com'])
@pytest.mark.parametrize("phone_number, expected_formatted", [
('07900900123', '07900 900123'), # UK
('+44(0)7900900123', '07900 900123'), # UK
('447900900123', '07900 900123'), # UK
('20-12-1234-1234', '+20 121 234 1234'), # Egypt
('00201212341234', '+20 121 234 1234'), # Egypt
('1664 0000000', '+1 664-000-0000'), # Montserrat
('7 499 1231212', '+7 499 123-12-12'), # Moscow (Russia)
('1-202-555-0104', '+1 202-555-0104'), # Washington DC (USA)
('+23051234567', '+230 5123 4567'), # Mauritius
('33(0)1 12345678', '+33 1 12 34 56 78'), # Paris (France)
])
def test_format_uk_and_international_phone_numbers(phone_number, expected_formatted):
assert format_phone_number_human_readable(phone_number) == expected_formatted
@pytest.mark.parametrize("recipient, expected_formatted", [
(True, ''),
(False, ''),
(0, ''),
(0.1, ''),
(None, ''),
('foo', 'foo'),
('TeSt@ExAmPl3.com', 'test@exampl3.com'),
('+4407900 900 123', '447900900123'),
('+1 800 555 5555', '18005555555'),
])
def test_format_recipient(recipient, expected_formatted):
assert format_recipient(recipient) == expected_formatted
def test_try_format_recipient_doesnt_throw():
assert try_validate_and_format_phone_number('ALPHANUM3R1C') == 'ALPHANUM3R1C'
def test_format_phone_number_human_readable_doenst_throw():
assert format_phone_number_human_readable('ALPHANUM3R1C') == 'ALPHANUM3R1C'
| {
"repo_name": "alphagov/notifications-utils",
"path": "tests/test_recipient_validation.py",
"copies": "1",
"size": "13469",
"license": "mit",
"hash": -6431273802895706000,
"line_mean": 32.3225806452,
"line_max": 106,
"alpha_frac": 0.6598406434,
"autogenerated": false,
"ratio": 3.259466019417476,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4419306662817476,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import pytest
from stp_core.loop.eventually import eventually
from plenum.common.messages.node_messages import PrePrepare
from plenum.common.util import adict
from plenum.server.suspicion_codes import Suspicions
from plenum.test.helper import getNodeSuspicions
from plenum.test.instances.helper import sentPrepare
from plenum.test.malicious_behaviors_node import makeNodeFaulty, \
sendDuplicate3PhaseMsg
from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica
from plenum.test import waits
@pytest.fixture("module")
def setup(txnPoolNodeSet):
primaryRep, nonPrimaryReps = getPrimaryReplica(txnPoolNodeSet, 0), \
getNonPrimaryReplicas(txnPoolNodeSet, 0)
# The primary replica would send 3 duplicate PRE-PREPARE requests to
# non primary replicas
makeNodeFaulty(primaryRep.node, partial(sendDuplicate3PhaseMsg,
msgType=PrePrepare, count=3))
# The node of the primary replica above should not be blacklisted by any
# other node since we are simulating multiple PRE-PREPARE messages and
# want to check for a particular suspicion
return adict(primaryRep=primaryRep, nonPrimaryReps=nonPrimaryReps)
# noinspection PyIncorrectDocstring
def testMultiplePrePrepareWithSameSeqNo(setup, looper, sent1):
"""
A primary replica sends duplicate PRE-PREPARE messages to the non primary
replicas but non primary replicas should raise suspicion on encountering
each duplicate PRE-PREPARE. Also it should send only one PREPARE
"""
primaryRep, nonPrimaryReps = setup.primaryRep, setup.nonPrimaryReps
def chkSusp():
for r in nonPrimaryReps:
# Every node with non primary replicas of instance 0 should raise
# suspicion twice, once for each extra PRE-PREPARE request
suspectingNodes = \
getNodeSuspicions(r.node,
Suspicions.DUPLICATE_PPR_SENT.code)
assert len(suspectingNodes) == 2
# Each non primary replica should just send one PREPARE
assert len(sentPrepare(r)) == 1
numOfNodes = len(primaryRep.node.nodeReg)
timeout = waits.expectedTransactionExecutionTime(numOfNodes)
looper.run(eventually(chkSusp, retryWait=1, timeout=timeout))
| {
"repo_name": "evernym/zeno",
"path": "plenum/test/instances/test_multiple_pre_prepare.py",
"copies": "2",
"size": "2359",
"license": "apache-2.0",
"hash": 161156013980216640,
"line_mean": 38.9830508475,
"line_max": 77,
"alpha_frac": 0.7253073336,
"autogenerated": false,
"ratio": 3.912106135986733,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5637413469586734,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import pytest
from stp_core.loop.eventually import eventually
from plenum.common.messages.node_messages import PrePrepare
from stp_core.common.util import adict
from plenum.server.suspicion_codes import Suspicions
from plenum.test.helper import getNodeSuspicions
from plenum.test.instances.helper import sentPrepare
from plenum.test.malicious_behaviors_node import makeNodeFaulty, \
send3PhaseMsgWithIncorrectDigest
from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica
from plenum.test import waits
@pytest.fixture("module")
def setup(txnPoolNodeSet):
primaryRep, nonPrimaryReps = getPrimaryReplica(txnPoolNodeSet, 0), \
getNonPrimaryReplicas(txnPoolNodeSet, 0)
# The primary replica would send PRE-PREPARE messages with incorrect digest
makeNodeFaulty(primaryRep.node, partial(send3PhaseMsgWithIncorrectDigest,
msgType=PrePrepare))
return adict(primaryRep=primaryRep, nonPrimaryReps=nonPrimaryReps)
# noinspection PyIncorrectDocstring
def testPrePrepareDigest(setup, looper, sent1):
"""
A primary replica sends PRE-PREPARE message with incorrect digest to the
non primary replicas but non primary replicas should raise suspicion on
encountering the PRE-PREPARE. Also it should send no PREPARE
"""
primaryRep, nonPrimaryReps = setup.primaryRep, setup.nonPrimaryReps
def chkSusp():
for r in nonPrimaryReps:
# Every node with non primary replicas of instance 0 should raise
# suspicion
susp_code = Suspicions.PPR_DIGEST_WRONG.code
# Since the node sending bad requests might become primary of
# some backup instance after view changes, it will again send a
# PRE-PREPARE with incorrect digest, so other nodes might raise
# suspicion more than once
assert len(getNodeSuspicions(r.node,
susp_code)) >= 1
# No non primary replica should send any PREPARE
assert len(sentPrepare(r, viewNo=0, ppSeqNo=1)) == 0
numOfNodes = len(primaryRep.node.nodeReg)
timeout = waits.expectedTransactionExecutionTime(numOfNodes)
looper.run(eventually(chkSusp, retryWait=1, timeout=timeout))
| {
"repo_name": "evernym/plenum",
"path": "plenum/test/instances/test_pre_prepare_digest.py",
"copies": "2",
"size": "2335",
"license": "apache-2.0",
"hash": -1076531585056593800,
"line_mean": 42.2407407407,
"line_max": 79,
"alpha_frac": 0.7156316916,
"autogenerated": false,
"ratio": 3.9112227805695143,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 54
} |
from functools import partial
import pytest
from sympy import symbols, sqrt, exp, I, Rational, IndexedBase
from qnet import (
CircuitSymbol, CIdentity, CircuitZero, CPermutation, SeriesProduct,
Feedback, SeriesInverse, circuit_identity as cid, Beamsplitter,
OperatorSymbol, IdentityOperator, ZeroOperator, Create, Destroy, Jz, Jplus,
Jminus, Phase, Displace, Squeeze, LocalSigma, LocalProjector, tr, Adjoint,
PseudoInverse, NullSpaceProjector, Commutator, LocalSpace, TrivialSpace,
FullSpace, Matrix, KetSymbol, ZeroKet, TrivialKet, BasisKet,
CoherentStateKet, UnequalSpaces, ScalarTimesKet, OperatorTimesKet, Bra,
OverlappingSpaces, SpaceTooLargeError, BraKet, KetBra, SuperOperatorSymbol,
IdentitySuperOperator, ZeroSuperOperator, SuperAdjoint, SPre, SPost,
SuperOperatorTimesOperator, FockIndex, StrLabel, IdxSym, latex,
configure_printing, QuantumDerivative, Scalar, ScalarExpression, SpinSpace,
SpinBasisKet, Eq)
from qnet.printing.latexprinter import QnetLatexPrinter
def test_ascii_scalar():
"""Test rendering of scalar values"""
assert latex(2) == '2'
latex.printer.cache = {}
# we always want 2.0 to be printed as '2'. Without this normalization, the
# state of the cache might introduce non-reproducible behavior, as 2==2.0
assert latex(2.0) == '2'
assert latex(1j) == '1i'
assert latex('foo') == 'foo'
i = IdxSym('i')
alpha = IndexedBase('alpha')
assert latex(i) == 'i'
assert latex(alpha[i]) == r'\alpha_{i}'
def test_tex_render_string():
"""Test rendering of ascii to latex strings"""
printer = QnetLatexPrinter()
assert printer._render_str('a') == r'a'
assert printer._render_str('A') == r'A'
assert printer._render_str('longword') == r'\text{longword}'
assert printer._render_str('alpha') == r'\alpha'
assert latex('alpha') == r'\alpha'
assert printer._render_str('Alpha') == r'A'
assert printer._render_str('Beta') == r'B'
assert printer._render_str('Gamma') == r'\Gamma'
assert printer._render_str('Delta') == r'\Delta'
assert printer._render_str('Epsilon') == r'E'
assert printer._render_str('Zeta') == r'Z'
assert printer._render_str('Eta') == r'H'
assert printer._render_str('Theta') == r'\Theta'
assert printer._render_str('Iota') == r'I'
assert printer._render_str('Kappa') == r'K'
assert printer._render_str('Lambda') == r'\Lambda'
assert printer._render_str('Mu') == r'M'
assert printer._render_str('Nu') == r'N'
assert printer._render_str('Xi') == r'\Xi'
assert printer._render_str('Omicron') == r'O'
assert printer._render_str('Pi') == r'\Pi'
assert printer._render_str('Rho') == r'P'
assert printer._render_str('Sigma') == r'\Sigma'
assert printer._render_str('Tau') == r'T'
assert printer._render_str('Ypsilon') == r'\Upsilon'
assert printer._render_str('Upsilon') == r'\Upsilon'
assert printer._render_str('ypsilon') == r'\upsilon'
assert printer._render_str('upsilon') == r'\upsilon'
assert printer._render_str('Phi') == r'\Phi'
assert printer._render_str('Chi') == r'X'
assert printer._render_str('Psi') == r'\Psi'
assert printer._render_str('Omega') == r'\Omega'
assert printer._render_str('xi_1') == r'\xi_{1}'
assert printer._render_str('xi_1^2') == r'\xi_{1}^{2}'
assert printer._render_str('Xi_1') == r'\Xi_{1}'
assert printer._render_str('Xi_long') == r'\Xi_{\text{long}}'
assert printer._render_str('Xi_1+2') == r'\Xi_{1+2}'
assert printer._render_str('Lambda_i,j') == r'\Lambda_{i,j}'
assert printer._render_str('epsilon_mu,nu') == r'\epsilon_{\mu,\nu}'
def test_tex_circuit_elements():
"""Test the tex representation of "atomic" circuit algebra elements"""
alpha, t = symbols('alpha, t')
theta = symbols('theta', positive=True)
assert latex(CircuitSymbol("C", cdim=2)) == 'C'
assert latex(CircuitSymbol("C_1", cdim=2)) == 'C_{1}'
assert latex(CircuitSymbol("Xi_2", cdim=2)) == r'\Xi_{2}'
assert latex(CircuitSymbol("Xi_full", cdim=2)) == r'\Xi_{\text{full}}'
assert (
latex(CircuitSymbol("C", alpha, t, cdim=2)) ==
r'C\left(\alpha, t\right)')
assert latex(CIdentity) == r'{\rm cid}(1)'
assert latex(cid(4)) == r'{\rm cid}(4)'
assert latex(CircuitZero) == r'{\rm cid}(0)'
assert latex(Beamsplitter()) == r'{\rm BS}\left(\frac{\pi}{4}\right)'
assert (
latex(Beamsplitter(mixing_angle=theta)) ==
r'{\rm BS}\left(\theta\right)')
def test_tex_circuit_operations():
"""Test the tex representation of circuit algebra operations"""
A = CircuitSymbol("A_test", cdim=2)
B = CircuitSymbol("B_test", cdim=2)
C = CircuitSymbol("C_test", cdim=2)
beta = CircuitSymbol("beta", cdim=1)
gamma = CircuitSymbol("gamma", cdim=1)
perm = CPermutation.create((2, 1, 0, 3))
assert (latex(A << B << C) ==
r'A_{\text{test}} \lhd B_{\text{test}} \lhd C_{\text{test}}')
assert (latex(A + B + C) ==
r'A_{\text{test}} \boxplus B_{\text{test}} '
r'\boxplus C_{\text{test}}')
assert (latex(A << (beta + gamma)) ==
r'A_{\text{test}} \lhd \left(\beta \boxplus \gamma\right)')
assert (latex(A + (B << C)) ==
r'A_{\text{test}} \boxplus '
r'\left(B_{\text{test}} \lhd C_{\text{test}}\right)')
assert (latex(perm) ==
r'\mathbf{P}_{\sigma}\begin{pmatrix} 0 & 1 & 2 & 3 \\ '
r'2 & 1 & 0 & 3 \end{pmatrix}')
assert (latex(SeriesProduct(perm, (A+B))) ==
r'\mathbf{P}_{\sigma}\begin{pmatrix} 0 & 1 & 2 & 3 \\ '
r'2 & 1 & 0 & 3 \end{pmatrix} '
r'\lhd \left(A_{\text{test}} \boxplus B_{\text{test}}\right)')
assert (latex(Feedback((A+B), out_port=3, in_port=0)) ==
r'\left\lfloor{A_{\text{test}} \boxplus B_{\text{test}}}'
r'\right\rfloor_{3\rightarrow{}0}')
assert (latex(SeriesInverse(A+B)) ==
r'\left[A_{\text{test}} \boxplus B_{\text{test}}\right]^{\rhd}')
def test_tex_hilbert_elements():
"""Test the tex representation of "atomic" Hilbert space algebra
elements"""
assert latex(LocalSpace(1)) == r'\mathcal{H}_{1}'
assert latex(LocalSpace(1, dimension=2)) == r'\mathcal{H}_{1}'
assert latex(LocalSpace(1, basis=(r'g', 'e'))) == r'\mathcal{H}_{1}'
assert latex(LocalSpace('local')) == r'\mathcal{H}_{\text{local}}'
assert latex(LocalSpace('kappa')) == r'\mathcal{H}_{\kappa}'
assert latex(TrivialSpace) == r'\mathcal{H}_{\text{null}}'
assert latex(FullSpace) == r'\mathcal{H}_{\text{total}}'
assert latex(LocalSpace(StrLabel(IdxSym('i')))) == r'\mathcal{H}_{i}'
def test_tex_hilbert_operations():
"""Test the tex representation of Hilbert space algebra operations"""
H1 = LocalSpace(1)
H2 = LocalSpace(2)
assert latex(H1 * H2) == r'\mathcal{H}_{1} \otimes \mathcal{H}_{2}'
def test_tex_matrix():
"""Test tex representation of the Matrix class"""
A = OperatorSymbol("A", hs=1)
B = OperatorSymbol("B", hs=1)
C = OperatorSymbol("C", hs=1)
D = OperatorSymbol("D", hs=1)
assert latex(OperatorSymbol("A", hs=1)) == r'\hat{A}^{(1)}'
assert (latex(Matrix([[A, B], [C, D]])) ==
r'\begin{pmatrix}\hat{A}^{(1)} & \hat{B}^{(1)} \\'
r'\hat{C}^{(1)} & \hat{D}^{(1)}\end{pmatrix}')
assert (latex(Matrix([A, B, C, D])) ==
r'\begin{pmatrix}\hat{A}^{(1)} \\\hat{B}^{(1)} \\'
r'\hat{C}^{(1)} \\\hat{D}^{(1)}\end{pmatrix}')
assert (latex(Matrix([[A, B, C, D]])) ==
r'\begin{pmatrix}\hat{A}^{(1)} & \hat{B}^{(1)} & '
r'\hat{C}^{(1)} & \hat{D}^{(1)}\end{pmatrix}')
assert (latex(Matrix([[0, 1], [-1, 0]])) ==
r'\begin{pmatrix}0 & 1 \\-1 & 0\end{pmatrix}')
assert latex(Matrix([[], []])) == r'\begin{pmatrix} \\\end{pmatrix}'
assert latex(Matrix([])) == r'\begin{pmatrix} \\\end{pmatrix}'
def test_tex_equation():
"""Test printing of the Eq class"""
eq_1 = Eq(
lhs=OperatorSymbol('H', hs=0),
rhs=Create(hs=0) * Destroy(hs=0))
eq = (
eq_1
.apply_to_lhs(lambda expr: expr + 1, cont=True)
.apply_to_rhs(lambda expr: expr + 1, cont=True)
.apply_to_rhs(lambda expr: expr**2, cont=True, tag=3)
.apply(lambda expr: expr + 1, cont=True, tag=4)
.apply_mtd_to_rhs('expand', cont=True)
.apply_to_lhs(lambda expr: expr**2, cont=True, tag=5)
.apply_mtd('expand', cont=True)
.apply_to_lhs(lambda expr: expr**2, cont=True, tag=6)
.apply_mtd_to_lhs('expand', cont=True)
.apply_to_rhs(lambda expr: expr + 1, cont=True)
)
assert (
latex(eq_1).split("\n") == [
r'\begin{equation}',
r' \hat{H}^{(0)} = \hat{a}^{(0)\dagger} \hat{a}^{(0)}',
r'\end{equation}',
''])
assert (
latex(eq_1.set_tag(1)).split("\n") == [
r'\begin{equation}',
r' \hat{H}^{(0)} = \hat{a}^{(0)\dagger} \hat{a}^{(0)}',
r'\tag{1}\end{equation}',
''])
tex_lines = (
latex(eq, show_hs_label=False, tex_op_macro=r'\Op{{{name}}}')
.split("\n"))
expected = [
r'\begin{align}',
r' \Op{H} &= \Op{a}^{\dagger} \Op{a}\\',
r' \mathbb{1} + \Op{H} &= \Op{a}^{\dagger} \Op{a}\\',
r' &= \mathbb{1} + \Op{a}^{\dagger} \Op{a}\\',
r' &= \left(\mathbb{1} + \Op{a}^{\dagger} \Op{a}\right) \left(\mathbb{1} + \Op{a}^{\dagger} \Op{a}\right)\tag{3}\\',
r' 2 + \Op{H} &= \mathbb{1} + \left(\mathbb{1} + \Op{a}^{\dagger} \Op{a}\right) \left(\mathbb{1} + \Op{a}^{\dagger} \Op{a}\right)\tag{4}\\',
r' &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\\',
r' \left(2 + \Op{H}\right) \left(2 + \Op{H}\right) &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\tag{5}\\',
r' 4 + 4 \Op{H} + \Op{H} \Op{H} &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\\',
r' \left(4 + 4 \Op{H} + \Op{H} \Op{H}\right) \left(4 + 4 \Op{H} + \Op{H} \Op{H}\right) &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\tag{6}\\',
r' 16 + 32 \Op{H} + \Op{H} \Op{H} \Op{H} \Op{H} + 8 \Op{H} \Op{H} \Op{H} + 24 \Op{H} \Op{H} &= 2 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}\\',
r' &= 3 + \Op{a}^{\dagger} \Op{a}^{\dagger} \Op{a} \Op{a} + 3 \Op{a}^{\dagger} \Op{a}',
r'\end{align}',
r'']
for i, line in enumerate(tex_lines):
assert line == expected[i]
def test_tex_operator_elements():
"""Test the tex representation of "atomic" operator algebra elements"""
hs1 = LocalSpace('q1', dimension=2)
hs2 = LocalSpace('q2', dimension=2)
alpha, beta = symbols('alpha, beta')
fock1 = LocalSpace(
1, local_identifiers={'Create': 'b', 'Destroy': 'b', 'Phase': 'Phi'})
spin1 = SpinSpace(
1, spin=1, local_identifiers={'Jz': 'Z', 'Jplus': 'Jp', 'Jminus': 'Jm'})
assert latex(OperatorSymbol("A", hs=hs1)) == r'\hat{A}^{(q_{1})}'
assert (latex(OperatorSymbol("A_1", hs=hs1*hs2)) ==
r'\hat{A}_{1}^{(q_{1} \otimes q_{2})}')
assert (latex(OperatorSymbol("Xi_2", hs=(r'q1', 'q2'))) ==
r'\hat{\Xi}_{2}^{(q_{1} \otimes q_{2})}')
assert (latex(OperatorSymbol("Xi_full", hs=1)) ==
r'\hat{\Xi}_{\text{full}}^{(1)}')
assert latex(OperatorSymbol("Xi", alpha, beta, hs=1)) == (
r'\hat{\Xi}^{(1)}\left(\alpha, \beta\right)')
assert latex(IdentityOperator) == r'\mathbb{1}'
assert latex(IdentityOperator, tex_identity_sym='I') == 'I'
assert latex(ZeroOperator) == r'\mathbb{0}'
assert latex(Create(hs=1)) == r'\hat{a}^{(1)\dagger}'
assert latex(Create(hs=fock1)) == r'\hat{b}^{(1)\dagger}'
assert latex(Destroy(hs=1)) == r'\hat{a}^{(1)}'
assert latex(Destroy(hs=fock1)) == r'\hat{b}^{(1)}'
assert latex(Jz(hs=SpinSpace(1, spin=1))) == r'\hat{J}_{z}^{(1)}'
assert latex(Jz(hs=spin1)) == r'\hat{Z}^{(1)}'
assert latex(Jplus(hs=SpinSpace(1, spin=1))) == r'\hat{J}_{+}^{(1)}'
assert latex(Jplus(hs=spin1)) == r'\text{Jp}^{(1)}'
assert latex(Jminus(hs=SpinSpace(1, spin=1))) == r'\hat{J}_{-}^{(1)}'
assert latex(Jminus(hs=spin1)) == r'\text{Jm}^{(1)}'
assert (latex(Phase(Rational(1, 2), hs=1)) ==
r'\text{Phase}^{(1)}\left(\frac{1}{2}\right)')
assert (latex(Phase(0.5, hs=1)) ==
r'\text{Phase}^{(1)}\left(0.5\right)')
assert (latex(Phase(0.5, hs=fock1)) ==
r'\hat{\Phi}^{(1)}\left(0.5\right)')
assert (latex(Displace(0.5, hs=1)) ==
r'\hat{D}^{(1)}\left(0.5\right)')
assert (latex(Squeeze(0.5, hs=1)) ==
r'\text{Squeeze}^{(1)}\left(0.5\right)')
hs_tls = LocalSpace('1', basis=('g', 'e'))
sig_e_g = LocalSigma('e', 'g', hs=hs_tls)
assert (
latex(sig_e_g, sig_as_ketbra=False) ==
r'\hat{\sigma}_{e,g}^{(1)}')
assert (
latex(sig_e_g) ==
r'\left\lvert e \middle\rangle\!\middle\langle g \right\rvert^{(1)}')
hs_tls = LocalSpace('1', basis=('excited', 'ground'))
sig_excited_ground = LocalSigma('excited', 'ground', hs=hs_tls)
assert (
latex(sig_excited_ground, sig_as_ketbra=False) ==
r'\hat{\sigma}_{\text{excited},\text{ground}}^{(1)}')
assert (
latex(sig_excited_ground) ==
r'\left\lvert \text{excited} \middle\rangle\!'
r'\middle\langle \text{ground} \right\rvert^{(1)}')
hs_tls = LocalSpace('1', basis=('mu', 'nu'))
sig_mu_nu = LocalSigma('mu', 'nu', hs=hs_tls)
assert (
latex(sig_mu_nu) ==
r'\left\lvert \mu \middle\rangle\!'
r'\middle\langle \nu \right\rvert^{(1)}')
hs_tls = LocalSpace('1', basis=('excited', 'ground'))
sig_excited_excited = LocalProjector('excited', hs=hs_tls)
assert (
latex(sig_excited_excited, sig_as_ketbra=False) ==
r'\hat{\Pi}_{\text{excited}}^{(1)}')
hs_tls = LocalSpace('1', basis=('g', 'e'))
sig_e_e = LocalProjector('e', hs=hs_tls)
assert (
latex(sig_e_e, sig_as_ketbra=False) == r'\hat{\Pi}_{e}^{(1)}')
def test_tex_operator_operations():
"""Test the tex representation of operator algebra operations"""
hs1 = LocalSpace('q_1', dimension=2)
hs2 = LocalSpace('q_2', dimension=2)
A = OperatorSymbol("A", hs=hs1)
B = OperatorSymbol("B", hs=hs1)
C = OperatorSymbol("C", hs=hs2)
psi = KetSymbol('Psi', hs=hs1)
gamma = symbols('gamma', positive=True)
assert latex(A.dag()) == r'\hat{A}^{(q_{1})\dagger}'
assert latex(A + B) == r'\hat{A}^{(q_{1})} + \hat{B}^{(q_{1})}'
assert latex(A * B) == r'\hat{A}^{(q_{1})} \hat{B}^{(q_{1})}'
assert latex(A * C) == r'\hat{A}^{(q_{1})} \hat{C}^{(q_{2})}'
assert latex(2 * A) == r'2 \hat{A}^{(q_{1})}'
assert latex(2j * A) == r'2i \hat{A}^{(q_{1})}'
assert latex((1+2j) * A) == r'(1+2i) \hat{A}^{(q_{1})}'
assert latex(gamma**2 * A) == r'\gamma^{2} \hat{A}^{(q_{1})}'
assert (
latex(-gamma**2/2 * A) == r'- \frac{\gamma^{2}}{2} \hat{A}^{(q_{1})}')
assert (
latex(tr(A * C, over_space=hs2)) ==
r'{\rm tr}_{q_{2}}\left[\hat{C}^{(q_{2})}\right] '
r'\hat{A}^{(q_{1})}')
assert latex(Adjoint(A)) == r'\hat{A}^{(q_{1})\dagger}'
assert (
latex(Adjoint(A**2)) ==
r'\left(\hat{A}^{(q_{1})} \hat{A}^{(q_{1})}\right)^\dagger')
assert (
latex(Adjoint(A)**2) ==
r'\hat{A}^{(q_{1})\dagger} \hat{A}^{(q_{1})\dagger}')
assert latex(Adjoint(Create(hs=1))) == r'\hat{a}^{(1)}'
assert (
latex(Adjoint(A + B)) ==
r'\left(\hat{A}^{(q_{1})} + \hat{B}^{(q_{1})}\right)^\dagger')
assert latex(PseudoInverse(A)) == r'\left(\hat{A}^{(q_{1})}\right)^+'
assert (
latex(PseudoInverse(A)**2) ==
r'\left(\hat{A}^{(q_{1})}\right)^+ \left(\hat{A}^{(q_{1})}\right)^+')
assert (latex(NullSpaceProjector(A)) ==
r'\hat{P}_{Ker}\left(\hat{A}^{(q_{1})}\right)')
assert latex(A - B) == r'\hat{A}^{(q_{1})} - \hat{B}^{(q_{1})}'
assert (latex(A - B + C) ==
r'\hat{A}^{(q_{1})} - \hat{B}^{(q_{1})} + \hat{C}^{(q_{2})}')
assert (latex(2 * A - sqrt(gamma) * (B + C)) ==
r'2 \hat{A}^{(q_{1})} - \sqrt{\gamma} \left(\hat{B}^{(q_{1})} + '
r'\hat{C}^{(q_{2})}\right)')
assert (latex(Commutator(A, B)) ==
r'\left[\hat{A}^{(q_{1})}, \hat{B}^{(q_{1})}\right]')
expr = (Commutator(A, B) * psi).dag()
assert (
latex(expr, show_hs_label=False) ==
r'\left\langle \Psi \right\rvert \left[\hat{A}, '
r'\hat{B}\right]^{\dagger}')
def test_tex_ket_elements():
"""Test the tex representation of "atomic" kets"""
hs1 = LocalSpace('q1', basis=('g', 'e'))
hs2 = LocalSpace('q2', basis=('g', 'e'))
alpha, beta = symbols('alpha, beta')
psi = KetSymbol('Psi', hs=hs1)
assert (latex(psi) == r'\left\lvert \Psi \right\rangle^{(q_{1})}')
assert (
latex(KetSymbol('Psi', alpha, beta, hs=1)) ==
r'\left\lvert \Psi\left(\alpha, \beta\right) \right\rangle^{(1)}')
assert (latex(psi, tex_use_braket=True) == r'\Ket{\Psi}^{(q_{1})}')
assert (
latex(psi, tex_use_braket=True, show_hs_label='subscript') ==
r'\Ket{\Psi}_{(q_{1})}')
assert (
latex(psi, tex_use_braket=True, show_hs_label=False) == r'\Ket{\Psi}')
assert (latex(KetSymbol('Psi', hs=1)) ==
r'\left\lvert \Psi \right\rangle^{(1)}')
assert (latex(KetSymbol('Psi', hs=(1, 2))) ==
r'\left\lvert \Psi \right\rangle^{(1 \otimes 2)}')
assert (latex(KetSymbol('Psi', hs=hs1*hs2)) ==
r'\left\lvert \Psi \right\rangle^{(q_{1} \otimes q_{2})}')
assert (latex(KetSymbol('Psi', hs=1)) ==
r'\left\lvert \Psi \right\rangle^{(1)}')
assert latex(ZeroKet) == '0'
assert latex(TrivialKet) == '1'
assert (latex(BasisKet('e', hs=hs1)) ==
r'\left\lvert e \right\rangle^{(q_{1})}')
hs_tls = LocalSpace('1', basis=('excited', 'ground'))
assert (latex(BasisKet('excited', hs=hs_tls)) ==
r'\left\lvert \text{excited} \right\rangle^{(1)}')
assert (latex(BasisKet(1, hs=1)) ==
r'\left\lvert 1 \right\rangle^{(1)}')
spin = SpinSpace('s', spin=(3, 2))
assert (
latex(SpinBasisKet(-3, 2, hs=spin)) ==
r'\left\lvert -3/2 \right\rangle^{(s)}')
assert (
latex(SpinBasisKet(1, 2, hs=spin)) ==
r'\left\lvert +1/2 \right\rangle^{(s)}')
assert (
latex(SpinBasisKet(-3, 2, hs=spin), tex_frac_for_spin_labels=True) ==
r'\left\lvert -\frac{3}{2} \right\rangle^{(s)}')
assert (
latex(SpinBasisKet(1, 2, hs=spin), tex_frac_for_spin_labels=True) ==
r'\left\lvert +\frac{1}{2} \right\rangle^{(s)}')
assert (latex(CoherentStateKet(2.0, hs=1)) ==
r'\left\lvert \alpha=2 \right\rangle^{(1)}')
def test_tex_symbolic_labels():
"""Test tex representation of symbols with symbolic labels"""
i = IdxSym('i')
j = IdxSym('j')
hs0 = LocalSpace(0)
hs1 = LocalSpace(1)
Psi = IndexedBase('Psi')
with configure_printing(tex_use_braket=True):
assert (
latex(BasisKet(FockIndex(2 * i), hs=hs0)) ==
r'\Ket{2 i}^{(0)}')
assert (latex(
KetSymbol(StrLabel(2 * i), hs=hs0)) ==
r'\Ket{2 i}^{(0)}')
assert (
latex(KetSymbol(StrLabel(Psi[i, j]), hs=hs0*hs1)) ==
r'\Ket{\Psi_{i j}}^{(0 \otimes 1)}')
expr = BasisKet(FockIndex(i), hs=hs0) * BasisKet(FockIndex(j), hs=hs1)
assert latex(expr) == r'\Ket{i,j}^{(0 \otimes 1)}'
assert (
latex(Bra(BasisKet(FockIndex(2 * i), hs=hs0))) ==
r'\Bra{2 i}^{(0)}')
assert (
latex(LocalSigma(FockIndex(i), FockIndex(j), hs=hs0)) ==
r'\Ket{i}\!\Bra{j}^{(0)}')
alpha = symbols('alpha')
expr = CoherentStateKet(alpha, hs=1).to_fock_representation()
assert (
latex(expr) ==
r'e^{- \frac{\alpha \overline{\alpha}}{2}} '
r'\left(\sum_{n \in \mathcal{H}_{1}} '
r'\frac{\alpha^{n}}{\sqrt{n!}} \Ket{n}^{(1)}\right)')
assert (
latex(expr, conjg_style='star') ==
r'e^{- \frac{\alpha {\alpha}^*}{2}} '
r'\left(\sum_{n \in \mathcal{H}_{1}} '
r'\frac{\alpha^{n}}{\sqrt{n!}} \Ket{n}^{(1)}\right)')
tls = SpinSpace(label='s', spin='1/2', basis=('down', 'up'))
Sig = IndexedBase('sigma')
n = IdxSym('n')
Sig_n = OperatorSymbol(StrLabel(Sig[n]), hs=tls)
assert latex(Sig_n, show_hs_label=False) == r'\hat{\sigma}_{n}'
def test_tex_bra_elements():
"""Test the tex representation of "atomic" kets"""
hs1 = LocalSpace('q1', basis=('g', 'e'))
hs2 = LocalSpace('q2', basis=('g', 'e'))
alpha, beta = symbols('alpha, beta')
bra = Bra(KetSymbol('Psi', hs=hs1))
assert (latex(bra) == r'\left\langle \Psi \right\rvert^{(q_{1})}')
assert latex(Bra(KetSymbol('Psi', alpha, beta, hs=hs1))) == (
r'\left\langle \Psi\left(\alpha, \beta\right) \right\rvert^{(q_{1})}')
assert (latex(bra, tex_use_braket=True) == r'\Bra{\Psi}^{(q_{1})}')
assert (
latex(bra, tex_use_braket=True, show_hs_label='subscript') ==
r'\Bra{\Psi}_{(q_{1})}')
assert (
latex(bra, tex_use_braket=True, show_hs_label=False) ==
r'\Bra{\Psi}')
assert (
latex(Bra(KetSymbol('Psi', hs=1))) ==
r'\left\langle \Psi \right\rvert^{(1)}')
assert (
latex(Bra(KetSymbol('Psi', hs=(1, 2)))) ==
r'\left\langle \Psi \right\rvert^{(1 \otimes 2)}')
assert (
latex(Bra(KetSymbol('Psi', hs=hs1*hs2))) ==
r'\left\langle \Psi \right\rvert^{(q_{1} \otimes q_{2})}')
assert (
latex(KetSymbol('Psi', hs=1).dag()) ==
r'\left\langle \Psi \right\rvert^{(1)}')
assert latex(Bra(ZeroKet)) == '0'
assert latex(Bra(TrivialKet)) == '1'
assert (
latex(BasisKet('e', hs=hs1).adjoint()) ==
r'\left\langle e \right\rvert^{(q_{1})}')
assert (
latex(BasisKet(1, hs=1).adjoint()) ==
r'\left\langle 1 \right\rvert^{(1)}')
assert (
latex(CoherentStateKet(2.0, hs=1).dag()) ==
r'\left\langle \alpha=2 \right\rvert^{(1)}')
def test_tex_ket_operations():
"""Test the tex representation of ket operations"""
hs1 = LocalSpace('q_1', basis=('g', 'e'))
hs2 = LocalSpace('q_2', basis=('g', 'e'))
ket_g1 = BasisKet('g', hs=hs1)
ket_e1 = BasisKet('e', hs=hs1)
ket_g2 = BasisKet('g', hs=hs2)
ket_e2 = BasisKet('e', hs=hs2)
psi1 = KetSymbol("Psi_1", hs=hs1)
psi2 = KetSymbol("Psi_2", hs=hs1)
psi2 = KetSymbol("Psi_2", hs=hs1)
psi3 = KetSymbol("Psi_3", hs=hs1)
phi = KetSymbol("Phi", hs=hs2)
A = OperatorSymbol("A_0", hs=hs1)
gamma = symbols('gamma', positive=True)
alpha = symbols('alpha')
beta = symbols('beta')
phase = exp(-I * gamma)
i = IdxSym('i')
assert (
latex(psi1 + psi2) ==
r'\left\lvert \Psi_{1} \right\rangle^{(q_{1})} + '
r'\left\lvert \Psi_{2} \right\rangle^{(q_{1})}')
assert (
latex(psi1 - psi2 + psi3) ==
r'\left\lvert \Psi_{1} \right\rangle^{(q_{1})} - '
r'\left\lvert \Psi_{2} \right\rangle^{(q_{1})} + '
r'\left\lvert \Psi_{3} \right\rangle^{(q_{1})}')
assert (
latex(psi1 * phi) ==
r'\left\lvert \Psi_{1} \right\rangle^{(q_{1})} \otimes '
r'\left\lvert \Phi \right\rangle^{(q_{2})}')
assert (
latex(phase * psi1) ==
r'e^{- i \gamma} \left\lvert \Psi_{1} \right\rangle^{(q_{1})}')
assert (
latex((alpha + 1) * KetSymbol('Psi', hs=0)) ==
r'\left(\alpha + 1\right) \left\lvert \Psi \right\rangle^{(0)}')
assert (
latex(A * psi1) ==
r'\hat{A}_{0}^{(q_{1})} \left\lvert \Psi_{1} \right\rangle^{(q_{1})}')
braket = BraKet(psi1, psi2)
assert (
latex(braket, show_hs_label='subscript') ==
r'\left\langle \Psi_{1} \middle\vert \Psi_{2} \right\rangle_{(q_{1})}')
assert (
latex(braket, show_hs_label=False) ==
r'\left\langle \Psi_{1} \middle\vert \Psi_{2} \right\rangle')
expr = BraKet(
KetSymbol('Psi_1', alpha, hs=hs1), KetSymbol('Psi_2', beta, hs=hs1))
assert (
latex(expr) ==
r'\left\langle \Psi_{1}\left(\alpha\right) \middle\vert '
r'\Psi_{2}\left(\beta\right) \right\rangle^{(q_{1})}')
assert (
latex(ket_e1 * ket_e2) ==
r'\left\lvert ee \right\rangle^{(q_{1} \otimes q_{2})}')
assert latex(ket_e1.dag() * ket_e1) == r'1'
assert latex(ket_g1.dag() * ket_e1) == r'0'
ketbra = KetBra(psi1, psi2)
assert (
latex(ketbra) ==
r'\left\lvert \Psi_{1} \middle\rangle\!'
r'\middle\langle \Psi_{2} \right\rvert^{(q_{1})}')
assert (
latex(ketbra, show_hs_label='subscript') ==
r'\left\lvert \Psi_{1} \middle\rangle\!'
r'\middle\langle \Psi_{2} \right\rvert_{(q_{1})}')
assert (
latex(ketbra, show_hs_label=False) ==
r'\left\lvert \Psi_{1} \middle\rangle\!'
r'\middle\langle \Psi_{2} \right\rvert')
expr = KetBra(
KetSymbol('Psi_1', alpha, hs=hs1), KetSymbol('Psi_2', beta, hs=hs1))
assert (
latex(expr) ==
r'\left\lvert \Psi_{1}\left(\alpha\right) \middle\rangle\!'
r'\middle\langle \Psi_{2}\left(\beta\right) \right\rvert^{(q_{1})}')
bell1 = (ket_e1 * ket_g2 - I * ket_g1 * ket_e2) / sqrt(2)
bell2 = (ket_e1 * ket_e2 - ket_g1 * ket_g2) / sqrt(2)
assert (
latex(bell1) ==
r'\frac{1}{\sqrt{2}} \left(\left\lvert eg \right\rangle^{(q_{1} '
r'\otimes q_{2})} - i \left\lvert ge \right\rangle'
r'^{(q_{1} \otimes q_{2})}\right)')
assert (
latex(bell2) ==
r'\frac{1}{\sqrt{2}} \left(\left\lvert ee \right\rangle^{(q_{1} '
r'\otimes q_{2})} - \left\lvert gg \right\rangle'
r'^{(q_{1} \otimes q_{2})}\right)')
assert (
latex(bell2, show_hs_label=False) ==
r'\frac{1}{\sqrt{2}} \left(\left\lvert ee \right\rangle - '
r'\left\lvert gg \right\rangle\right)')
assert BraKet.create(bell1, bell2).expand() == 0
assert (
latex(BraKet.create(bell1, bell2)) ==
r'\frac{1}{2} \left(\left\langle eg \right\rvert'
r'^{(q_{1} \otimes q_{2})} + i \left\langle ge \right\rvert'
r'^{(q_{1} \otimes q_{2})}\right) '
r'\left(\left\lvert ee \right\rangle^{(q_{1} \otimes q_{2})} '
r'- \left\lvert gg \right\rangle^{(q_{1} \otimes q_{2})}\right)')
assert (
latex(KetBra.create(bell1, bell2)) ==
r'\frac{1}{2} \left(\left\lvert eg \right\rangle'
r'^{(q_{1} \otimes q_{2})} - i \left\lvert ge \right\rangle'
r'^{(q_{1} \otimes q_{2})}\right)\left(\left\langle ee \right\rvert'
r'^{(q_{1} \otimes q_{2})} - \left\langle gg \right\rvert'
r'^{(q_{1} \otimes q_{2})}\right)')
with configure_printing(tex_use_braket=True):
expr = KetBra(KetSymbol('Psi', hs=0), BasisKet(FockIndex(i), hs=0))
assert latex(expr) == r'\Ket{\Psi}\!\Bra{i}^{(0)}'
expr = KetBra(BasisKet(FockIndex(i), hs=0), KetSymbol('Psi', hs=0))
assert latex(expr) == r'\Ket{i}\!\Bra{\Psi}^{(0)}'
expr = BraKet(KetSymbol('Psi', hs=0), BasisKet(FockIndex(i), hs=0))
assert latex(expr) == r'\Braket{\Psi | i}^(0)'
expr = BraKet(BasisKet(FockIndex(i), hs=0), KetSymbol('Psi', hs=0))
assert latex(expr) == r'\Braket{i | \Psi}^(0)'
def test_tex_bra_operations():
"""Test the tex representation of bra operations"""
hs1 = LocalSpace('q_1', dimension=2)
hs2 = LocalSpace('q_2', dimension=2)
psi1 = KetSymbol("Psi_1", hs=hs1)
psi2 = KetSymbol("Psi_2", hs=hs1)
psi2 = KetSymbol("Psi_2", hs=hs1)
bra_psi1 = KetSymbol("Psi_1", hs=hs1).dag()
bra_psi2 = KetSymbol("Psi_2", hs=hs1).dag()
bra_psi2 = KetSymbol("Psi_2", hs=hs1).dag()
bra_psi3 = KetSymbol("Psi_3", hs=hs1).dag()
bra_phi = KetSymbol("Phi", hs=hs2).dag()
A = OperatorSymbol("A_0", hs=hs1)
gamma = symbols('gamma', positive=True)
phase = exp(-I * gamma)
assert (
latex((psi1 + psi2).dag()) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} + '
r'\left\langle \Psi_{2} \right\rvert^{(q_{1})}')
assert (
latex((psi1 + psi2).dag(), tex_use_braket=True) ==
r'\Bra{\Psi_{1}}^{(q_{1})} + \Bra{\Psi_{2}}^{(q_{1})}')
assert (
latex(bra_psi1 + bra_psi2) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} + '
r'\left\langle \Psi_{2} \right\rvert^{(q_{1})}')
assert (
latex(bra_psi1 - bra_psi2 + bra_psi3) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} - '
r'\left\langle \Psi_{2} \right\rvert^{(q_{1})} + '
r'\left\langle \Psi_{3} \right\rvert^{(q_{1})}')
assert (
latex(bra_psi1 * bra_phi) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} \otimes '
r'\left\langle \Phi \right\rvert^{(q_{2})}')
assert (
latex(bra_psi1 * bra_phi, tex_use_braket=True) ==
r'\Bra{\Psi_{1}}^{(q_{1})} \otimes \Bra{\Phi}^{(q_{2})}')
assert (
latex(Bra(phase * psi1)) ==
r'e^{i \gamma} \left\langle \Psi_{1} \right\rvert^{(q_{1})}')
assert (
latex((A * psi1).dag()) ==
r'\left\langle \Psi_{1} \right\rvert^{(q_{1})} '
r'\hat{A}_{0}^{(q_{1})\dagger}')
def test_tex_sop_elements():
"""Test the tex representation of "atomic" Superoperators"""
hs1 = LocalSpace('q1', dimension=2)
hs2 = LocalSpace('q2', dimension=2)
alpha, beta = symbols('alpha, beta')
assert latex(SuperOperatorSymbol("A", hs=hs1)) == r'\mathrm{A}^{(q_{1})}'
assert (latex(SuperOperatorSymbol("A_1", hs=hs1*hs2)) ==
r'\mathrm{A}_{1}^{(q_{1} \otimes q_{2})}')
assert (latex(SuperOperatorSymbol("Xi", alpha, beta, hs=hs1)) ==
r'\mathrm{\Xi}^{(q_{1})}\left(\alpha, \beta\right)')
assert (latex(SuperOperatorSymbol("Xi_2", hs=('q1', 'q2'))) ==
r'\mathrm{\Xi}_{2}^{(q_{1} \otimes q_{2})}')
assert (latex(SuperOperatorSymbol("Xi_full", hs=1)) ==
r'\mathrm{\Xi}_{\text{full}}^{(1)}')
assert latex(IdentitySuperOperator) == r'\mathbb{1}'
assert latex(ZeroSuperOperator) == r'\mathbb{0}'
def test_tex_sop_operations():
"""Test the tex representation of super operator algebra operations"""
hs1 = LocalSpace('q_1', dimension=2)
hs2 = LocalSpace('q_2', dimension=2)
A = SuperOperatorSymbol("A", hs=hs1)
B = SuperOperatorSymbol("B", hs=hs1)
C = SuperOperatorSymbol("C", hs=hs2)
L = SuperOperatorSymbol("L", hs=1)
M = SuperOperatorSymbol("M", hs=1)
A_op = OperatorSymbol("A", hs=1)
gamma = symbols('gamma', positive=True)
assert latex(A + B) == r'\mathrm{A}^{(q_{1})} + \mathrm{B}^{(q_{1})}'
assert latex(A * B) == r'\mathrm{A}^{(q_{1})} \mathrm{B}^{(q_{1})}'
assert latex(A * C) == r'\mathrm{A}^{(q_{1})} \mathrm{C}^{(q_{2})}'
assert latex(2 * A) == r'2 \mathrm{A}^{(q_{1})}'
assert latex(2j * A) == r'2i \mathrm{A}^{(q_{1})}'
assert latex((1+2j) * A) == r'(1+2i) \mathrm{A}^{(q_{1})}'
assert latex(gamma**2 * A) == r'\gamma^{2} \mathrm{A}^{(q_{1})}'
assert (latex(-gamma**2/2 * A) ==
r'- \frac{\gamma^{2}}{2} \mathrm{A}^{(q_{1})}')
assert latex(SuperAdjoint(A)) == r'\mathrm{A}^{(q_{1})\dagger}'
assert (latex(SuperAdjoint(A + B)) ==
r'\left(\mathrm{A}^{(q_{1})} + '
r'\mathrm{B}^{(q_{1})}\right)^\dagger')
assert latex(A - B) == r'\mathrm{A}^{(q_{1})} - \mathrm{B}^{(q_{1})}'
assert (latex(A - B + C) ==
r'\mathrm{A}^{(q_{1})} - \mathrm{B}^{(q_{1})} + '
r'\mathrm{C}^{(q_{2})}')
assert (latex(2 * A - sqrt(gamma) * (B + C)) ==
r'2 \mathrm{A}^{(q_{1})} - \sqrt{\gamma} '
r'\left(\mathrm{B}^{(q_{1})} + \mathrm{C}^{(q_{2})}\right)')
assert latex(SPre(A_op)) == r'\mathrm{SPre}\left(\hat{A}^{(1)}\right)'
assert latex(SPost(A_op)) == r'\mathrm{SPost}\left(\hat{A}^{(1)}\right)'
assert (latex(SuperOperatorTimesOperator(L, A_op)) ==
r'\mathrm{L}^{(1)}\left[\hat{A}^{(1)}\right]')
assert (latex(SuperOperatorTimesOperator(L, sqrt(gamma) * A_op)) ==
r'\mathrm{L}^{(1)}\left[\sqrt{\gamma} \hat{A}^{(1)}\right]')
assert (latex(SuperOperatorTimesOperator((L + 2*M), A_op)) ==
r'\left(\mathrm{L}^{(1)} + 2 \mathrm{M}^{(1)}\right)'
r'\left[\hat{A}^{(1)}\right]')
def test_tex_spin_arrows():
"""Test the representation of spin-1/2 spaces with special labels "down",
"up" as arrows"""
tls1 = SpinSpace('1', spin='1/2', basis=("down", "up"))
tls2 = SpinSpace('2', spin='1/2', basis=("down", "up"))
tls3 = SpinSpace('3', spin='1/2', basis=("down", "up"))
down1 = BasisKet('down', hs=tls1)
up1 = BasisKet('up', hs=tls1)
down2 = BasisKet('down', hs=tls2)
up3 = BasisKet('up', hs=tls3)
assert latex(down1) == r'\left\lvert \downarrow \right\rangle^{(1)}'
assert latex(up1) == r'\left\lvert \uparrow \right\rangle^{(1)}'
ket = down1 * down2 * up3
assert (
latex(ket) ==
r'\left\lvert \downarrow\downarrow\uparrow \right\rangle'
r'^{(1 \otimes 2 \otimes 3)}')
sig = LocalSigma("up", "down", hs=tls1)
assert (
latex(sig) ==
r'\left\lvert \uparrow \middle\rangle\!'
r'\middle\langle \downarrow \right\rvert^{(1)}')
@pytest.mark.xfail
def test_tex_spin_arrows_multi_sigma():
# when fixed, combine with test_tex_spin_arrows
tls1 = SpinSpace('1', spin='1/2', basis=("down", "up"))
tls2 = SpinSpace('2', spin='1/2', basis=("down", "up"))
tls3 = SpinSpace('3', spin='1/2', basis=("down", "up"))
sig1 = LocalSigma("up", "down", hs=tls1)
sig2 = LocalSigma("up", "up", hs=tls2)
sig3 = LocalSigma("down", "down", hs=tls3)
assert latex(sig1 * sig2 * sig3) == r''
def test_repr_latex():
"""Test the automatic representation in the notebook"""
A = OperatorSymbol("A", hs=1)
B = OperatorSymbol("B", hs=1)
assert A._repr_latex_() == "$%s$" % latex(A)
assert (A + B)._repr_latex_() == "$%s$" % latex(A + B)
@pytest.fixture
def MyScalarFunc():
class MyScalarDerivative(QuantumDerivative, Scalar):
pass
class ScalarFunc(ScalarExpression):
def __init__(self, name, *sym_args):
self._name = name
self._sym_args = sym_args
super().__init__(name, *sym_args)
def _adjoint(self):
return self
@property
def args(self):
return (self._name, *self._sym_args)
def _diff(self, sym):
return MyScalarDerivative(self, derivs={sym: 1})
def _latex(self, *args, **kwargs):
return "%s(%s)" % (
self._name, ", ".join(
[latex(sym) for sym in self._sym_args]))
return ScalarFunc
def test_tex_derivative(MyScalarFunc):
s, s0, t, t0, gamma = symbols('s, s_0, t, t_0, gamma', real=True)
m = IdxSym('m')
n = IdxSym('n')
S = IndexedBase('s')
T = IndexedBase('t')
f = partial(MyScalarFunc, "f")
g = partial(MyScalarFunc, "g")
expr = f(s, t).diff(t)
assert latex(expr) == r'\frac{\partial}{\partial t} f(s, t)'
expr = f(s, t).diff(s, n=2).diff(t)
assert latex(expr) == (
r'\frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t)')
expr = f(s, t).diff(s, n=2).diff(t).evaluate_at({s: s0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t) '
r'\right\vert_{s=s_{0}}')
expr = f(S[m], T[n]).diff(S[m], n=2).diff(T[n]).evaluate_at({S[m]: s0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s_{m}^{2} \partial t_{n}} '
r'f(s_{m}, t_{n}) \right\vert_{s_{m}=s_{0}}')
expr = f(s, t).diff(s, n=2).diff(t).evaluate_at({s: 0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t) '
r'\right\vert_{s=0}')
expr = f(gamma, t).diff(gamma, n=2).diff(t).evaluate_at({gamma: 0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial \gamma^{2} \partial t} '
r'f(\gamma, t) \right\vert_{\gamma=0}')
expr = f(s, t).diff(s, n=2).diff(t).evaluate_at({s: s0, t: t0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t) '
r'\right\vert_{s=s_{0}, t=t_{0}}')
D = expr.__class__
expr = D(f(s, t) + g(s, t), derivs={s: 2, t: 1}, vals={s: s0, t: t0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} '
r'\left(f(s, t) + g(s, t)\right) \right\vert_{s=s_{0}, t=t_{0}}')
expr = D(2 * f(s, t), derivs={s: 2, t: 1}, vals={s: s0, t: t0})
assert latex(expr) == (
r'\left. \frac{\partial^{3}}{\partial s^{2} \partial t} '
r'\left(2 f(s, t)\right) \right\vert_{s=s_{0}, t=t_{0}}')
expr = f(s, t).diff(t) * g(s, t)
assert latex(expr) == (
r'\left(\frac{\partial}{\partial t} f(s, t)\right) g(s, t)')
expr = f(s, t).diff(t).evaluate_at({t: 0}) * g(s, t)
assert latex(expr) == (
r'\left(\left. \frac{\partial}{\partial t} f(s, t) '
r'\right\vert_{t=0}\right) g(s, t)')
expr = f(s, t).diff(t) + g(s, t)
assert latex(expr) == r'\frac{\partial}{\partial t} f(s, t) + g(s, t)'
f = MyScalarFunc("f", S[m], T[n])
series = f.series_expand(T[n], about=0, order=3)
assert latex(series) == (
r'\left(f(s_{m}, 0), \left. \frac{\partial}{\partial t_{n}} '
r'f(s_{m}, t_{n}) \right\vert_{t_{n}=0}, \frac{1}{2} \left(\left. '
r'\frac{\partial^{2}}{\partial t_{n}^{2}} f(s_{m}, t_{n}) '
r'\right\vert_{t_{n}=0}\right), \frac{1}{6} \left(\left. '
r'\frac{\partial^{3}}{\partial t_{n}^{3}} f(s_{m}, t_{n}) '
r'\right\vert_{t_{n}=0}\right)\right)')
f = MyScalarFunc("f", s, t)
series = f.series_expand(t, about=0, order=2)
assert (
latex(series) ==
r'\left(f(s, 0), \left. \frac{\partial}{\partial t} f(s, t) '
r'\right\vert_{t=0}, \frac{1}{2} \left(\left. '
r'\frac{\partial^{2}}{\partial t^{2}} f(s, t) '
r'\right\vert_{t=0}\right)\right)')
expr = ( # nested derivative
MyScalarFunc("f", s, t)
.diff(s, n=2)
.diff(t)
.evaluate_at({t: t0})
.diff(t0))
assert latex(expr) == (
r'\frac{\partial}{\partial t_{0}} \left(\left. '
r'\frac{\partial^{3}}{\partial s^{2} \partial t} f(s, t) '
r'\right\vert_{t=t_{0}}\right)')
| {
"repo_name": "mabuchilab/QNET",
"path": "tests/printing/test_tex_printing.py",
"copies": "1",
"size": "38862",
"license": "mit",
"hash": 6791401997737617000,
"line_mean": 42.6161616162,
"line_max": 189,
"alpha_frac": 0.5304410478,
"autogenerated": false,
"ratio": 2.6585032152141195,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36889442630141195,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import pytest
empty = object()
class cached_property(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class SimpleProxy(object):
def __init__(self, factory):
self.factory = factory
self.object = empty
def __str__(self):
if self.object is empty:
self.object = self.factory()
return str(self.object)
class CachedPropertyProxy(object):
def __init__(self, factory):
self.factory = factory
@cached_property
def object(self):
return self.factory()
def __str__(self):
return str(self.object)
class LocalsSimpleProxy(object):
def __init__(self, factory):
self.factory = factory
self.object = empty
def __str__(self, func=str):
if self.object is empty:
self.object = self.factory()
return func(self.object)
class LocalsCachedPropertyProxy(object):
def __init__(self, factory):
self.factory = factory
@cached_property
def object(self):
return self.factory()
def __str__(self, func=str):
return func(self.object)
@pytest.fixture(scope="module", params=["SimpleProxy", "CachedPropertyProxy", "LocalsSimpleProxy", "LocalsCachedPropertyProxy"])
def impl(request):
return globals()[request.param]
def test_proto(benchmark, impl):
obj = "foobar"
proxied = impl(lambda: obj)
result = benchmark(partial(str, proxied))
assert result == obj
| {
"repo_name": "thedrow/pytest-benchmark",
"path": "tests/test_sample.py",
"copies": "3",
"size": "1614",
"license": "bsd-2-clause",
"hash": 6076887617199112000,
"line_mean": 21.1095890411,
"line_max": 128,
"alpha_frac": 0.6164807931,
"autogenerated": false,
"ratio": 3.9174757281553396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010619093129446744,
"num_lines": 73
} |
from functools import partial
import simplejson
from django.http import HttpResponseNotFound, HttpResponseForbidden, \
HttpResponse, HttpResponseBadRequest
from rip import error_types
http_status_code_mapping = dict(
GET=200,
PATCH=202,
POST=201,
DELETE=204
)
class HttpAuthenticationFailed(HttpResponse):
status_code = 401
http_response_mapping = {
error_types.ObjectNotFound: HttpResponseNotFound,
error_types.ActionForbidden: HttpResponseForbidden,
error_types.AuthenticationFailed: HttpAuthenticationFailed,
error_types.InvalidData: HttpResponseBadRequest,
error_types.MethodNotAllowed: partial(HttpResponse, status=405)
}
def build_http_response(http_request, response):
if response.is_success:
return HttpResponse(
status=http_status_code_mapping[http_request.method],
content=simplejson.dumps(response.data),
content_type='application/json')
# return a successful response
else:
response_cls = http_response_mapping[response.reason]
return response_cls(content=simplejson.dumps(response.data),
content_type="application/json")
| {
"repo_name": "Aplopio/django_rip",
"path": "rip/django_adapter/django_response_builder.py",
"copies": "2",
"size": "1192",
"license": "mit",
"hash": 7743148259519306000,
"line_mean": 28.0731707317,
"line_max": 70,
"alpha_frac": 0.7206375839,
"autogenerated": false,
"ratio": 4.382352941176471,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 41
} |
from functools import partial
import six
from graphql_relay import from_global_id, to_global_id
from ..types import ID, Field, Interface, ObjectType
from ..types.interface import InterfaceMeta
def is_node(objecttype):
'''
Check if the given objecttype has Node as an interface
'''
assert issubclass(objecttype, ObjectType), (
'Only ObjectTypes can have a Node interface. Received %s'
) % objecttype
for i in objecttype._meta.interfaces:
if issubclass(i, Node):
return True
return False
def get_default_connection(cls):
from .connection import Connection
assert issubclass(cls, ObjectType), (
'Can only get connection type on implemented Nodes.'
)
class Meta:
node = cls
return type('{}Connection'.format(cls.__name__), (Connection,), {'Meta': Meta})
class GlobalID(Field):
def __init__(self, node, *args, **kwargs):
super(GlobalID, self).__init__(ID, *args, **kwargs)
self.node = node
@staticmethod
def id_resolver(parent_resolver, node, root, args, context, info):
id = parent_resolver(root, args, context, info)
return node.to_global_id(info.parent_type.name, id) # root._meta.name
def get_resolver(self, parent_resolver):
return partial(self.id_resolver, parent_resolver, self.node)
class NodeMeta(InterfaceMeta):
def __new__(cls, name, bases, attrs):
cls = InterfaceMeta.__new__(cls, name, bases, attrs)
cls._meta.fields['id'] = GlobalID(cls, required=True, description='The ID of the object.')
return cls
class NodeField(Field):
def __init__(self, node, type=False, deprecation_reason=None,
name=None, **kwargs):
assert issubclass(node, Node), 'NodeField can only operate in Nodes'
type = type or node
super(NodeField, self).__init__(
type,
description='The ID of the object',
id=ID(required=True),
resolver=node.node_resolver
)
class Node(six.with_metaclass(NodeMeta, Interface)):
'''An object with an ID'''
@classmethod
def Field(cls, *args, **kwargs): # noqa: N802
return NodeField(cls, *args, **kwargs)
@classmethod
def node_resolver(cls, root, args, context, info):
return cls.get_node_from_global_id(args.get('id'), context, info)
@classmethod
def get_node_from_global_id(cls, global_id, context, info):
try:
_type, _id = cls.from_global_id(global_id)
graphene_type = info.schema.get_type(_type).graphene_type
# We make sure the ObjectType implements the "Node" interface
assert cls in graphene_type._meta.interfaces
except:
return None
get_node = getattr(graphene_type, 'get_node', None)
if get_node:
return get_node(_id, context, info)
@classmethod
def from_global_id(cls, global_id):
return from_global_id(global_id)
@classmethod
def to_global_id(cls, type, id):
return to_global_id(type, id)
@classmethod
def implements(cls, objecttype):
get_connection = getattr(objecttype, 'get_connection', None)
if not get_connection:
get_connection = partial(get_default_connection, objecttype)
objecttype.Connection = get_connection()
| {
"repo_name": "sjhewitt/graphene",
"path": "graphene/relay/node.py",
"copies": "1",
"size": "3371",
"license": "mit",
"hash": -4550672987266002400,
"line_mean": 29.3693693694,
"line_max": 98,
"alpha_frac": 0.6291901513,
"autogenerated": false,
"ratio": 3.883640552995392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007613132613132613,
"num_lines": 111
} |
from functools import partial
import slim
import tensorflow as tf
import data_provider
import utils
from slim import ops
from slim import scopes
def align_reference_shape(reference_shape, reference_shape_bb, im, bb):
def norm(x):
return tf.sqrt(tf.reduce_sum(tf.square(x - tf.reduce_mean(x, 0))))
ratio = norm(bb) / norm(reference_shape_bb)
align_mean_shape = (reference_shape - tf.reduce_mean(reference_shape_bb, 0)) * ratio + tf.reduce_mean(bb, 0)
new_size = tf.to_int32(tf.to_float(tf.shape(im)[:2]) / ratio)
return tf.image.resize_bilinear(tf.expand_dims(im, 0), new_size)[0, :, :, :], align_mean_shape / ratio, ratio
def normalized_rmse(pred, gt_truth):
norm = tf.sqrt(tf.reduce_sum(((gt_truth[:, 36, :] - gt_truth[:, 45, :])**2), 1))
return tf.reduce_sum(tf.sqrt(tf.reduce_sum(tf.square(pred - gt_truth), 2)), 1) / (norm * 68)
def conv_model(inputs, is_training=True, scope=''):
# summaries or losses.
net = {}
with tf.op_scope([inputs], scope, 'mdm_conv'):
with scopes.arg_scope([ops.conv2d, ops.fc], is_training=is_training):
with scopes.arg_scope([ops.conv2d], activation=tf.nn.relu, padding='VALID'):
net['conv_1'] = ops.conv2d(inputs, 32, [3, 3], scope='conv_1')
net['pool_1'] = ops.max_pool(net['conv_1'], [2, 2])
net['conv_2'] = ops.conv2d(net['pool_1'], 32, [3, 3], scope='conv_2')
net['pool_2'] = ops.max_pool(net['conv_2'], [2, 2])
crop_size = net['pool_2'].get_shape().as_list()[1:3]
net['conv_2_cropped'] = utils.get_central_crop(net['conv_2'], box=crop_size)
net['concat'] = tf.concat(3, [net['conv_2_cropped'], net['pool_2']])
return net
def model(images, inits, num_iterations=4, num_patches=68, patch_shape=(26, 26), num_channels=3):
batch_size = images.get_shape().as_list()[0]
hidden_state = tf.zeros((batch_size, 512))
dx = tf.zeros((batch_size, num_patches, 2))
endpoints = {}
dxs = []
for step in range(num_iterations):
with tf.device('/cpu:0'):
patches = tf.image.extract_patches(images, tf.constant(patch_shape), inits+dx)
patches = tf.reshape(patches, (batch_size * num_patches, patch_shape[0], patch_shape[1], num_channels))
endpoints['patches'] = patches
with tf.variable_scope('convnet', reuse=step>0):
net = conv_model(patches)
ims = net['concat']
ims = tf.reshape(ims, (batch_size, -1))
with tf.variable_scope('rnn', reuse=step>0) as scope:
hidden_state = slim.ops.fc(tf.concat(1, [ims, hidden_state]), 512, activation=tf.tanh)
prediction = slim.ops.fc(hidden_state, num_patches * 2, scope='pred', activation=None)
endpoints['prediction'] = prediction
prediction = tf.reshape(prediction, (batch_size, num_patches, 2))
dx += prediction
dxs.append(dx)
return inits + dx, dxs, endpoints
| {
"repo_name": "trigeorgis/mdm",
"path": "mdm_model.py",
"copies": "1",
"size": "2876",
"license": "bsd-3-clause",
"hash": -2132770990607596500,
"line_mean": 38.397260274,
"line_max": 113,
"alpha_frac": 0.6276077886,
"autogenerated": false,
"ratio": 3.00836820083682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.413597598943682,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import sqlalchemy as sa
from sqlalchemy.orm import Query
from sqlalchemy.orm import sessionmaker
from enkiblog.workflow import P, Allow
InstrumentedAttribute = sa.orm.attributes.InstrumentedAttribute
def resolve_callable_props(context, permission):
# TODO: move to workflow?
agents = permission.agents
for _ in range(10):
if not callable(agents):
break
agents = agents(context)
else:
raise RuntimeError("Could not resolve '%s' callable property " % permission.agents)
return P(permission.allowance, agents, permission.actions)
def get_allowance_permissions_per_state(context, request):
# pylint: disable=invalid-name
workflow = request.workflow
resolwer = partial(resolve_callable_props, context)
for state in workflow.state_info(context, request):
for permission in map(resolwer, state['data']['acl']):
if permission.allowance != Allow:
continue
yield state['name'], permission
def get_relational_and_principale_states(allowance_per_state, actions):
# pylint: disable=invalid-name
allowing_states_and_agents = [
(state, perm.agents)
for state, perm in allowance_per_state
if not set(perm.actions).isdisjoint(actions)
]
relational_states = []
principale_states = []
for (state, agents) in allowing_states_and_agents:
for agent in agents:
(relational_states
if isinstance(agent, InstrumentedAttribute) else
principale_states
).append((state, agent))
return relational_states, principale_states
def acl_query_params_builder(cls, request, actions):
effective_principals = set(request.effective_principals)
user = request.user
# TODO: add tests (or is it added ?)
# !!!: doesn't allow to have localy stored custom acl!!!
allowance_per_state = get_allowance_permissions_per_state(cls, request)
relational_states, principale_states = get_relational_and_principale_states(
allowance_per_state, actions)
allowing_states_for_principals = tuple(
state for (state, agent) in principale_states
if agent in effective_principals
)
params = cls.state.in_(allowing_states_for_principals)
if user:
acl_allowed_posts_queries = [
sa.and_(cls.state == state, agent == user)
for (state, agent) in relational_states]
params = sa.or_(params, *acl_allowed_posts_queries)
return params
class ACLFilteringQuery(Query):
def acl_filter(self, request, actions=('view',)):
query = self
for entity in self._entities:
params = acl_query_params_builder(entity.mapper.class_, request, actions)
query = query.filter(params)
return query
def create_session_maker(engine):
dbmaker = sessionmaker()
dbmaker.configure(bind=engine, query_cls=ACLFilteringQuery)
return dbmaker
| {
"repo_name": "enkidulan/enkiblog",
"path": "src/enkiblog/core/meta.py",
"copies": "1",
"size": "2989",
"license": "apache-2.0",
"hash": -4651077371934993000,
"line_mean": 31.4891304348,
"line_max": 91,
"alpha_frac": 0.6717965875,
"autogenerated": false,
"ratio": 3.8767833981841764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5048579985684176,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import sublime
import sublime_plugin
from .lib import debug, manager, settings, util
from .lib.command import Command
class ToolRunner(sublime_plugin.WindowCommand):
def run(self, tool=None, group=None, profile=None, default_profile=False, **kwargs):
command = Command(self.window, kwargs)
if tool is not None:
command.run_tool(tool)
elif group is not None:
if default_profile:
profile = settings.get_setting("default_profiles").get(group)
if profile is not None:
command.run_profile(group, profile)
else:
self._ask_profile_and_run_command(
group, partial(self._on_ask_profile_done, command)
)
else:
self._ask_type_to_run(partial(self._on_ask_type_done, command))
def _ask_type_to_run(self, callback):
self.window.show_quick_panel(["Tool", "Group"], callback, 0, 0, None)
def _on_ask_type_done(self, command, selected_index):
if selected_index == 0:
sublime.set_timeout(
partial(
self._ask_tool_to_run, partial(self._on_ask_tool_done, command)
),
0,
)
elif selected_index == 1:
sublime.set_timeout(
partial(
self._ask_group_and_profile_to_run,
partial(self._on_ask_group_done, command),
),
0,
)
def _ask_tool_to_run(self, callback):
tool_list = []
tool_selection_list = []
def_tool_list = settings.get_tools()
if len(def_tool_list) <= 0:
sublime.error_message("There are no tools configured")
return
debug.log("Creating Tools item list for Quick Panel", def_tool_list)
for single_tool in def_tool_list:
debug.log("Appending ", single_tool)
tool_name = single_tool.get("name", single_tool.get("cmd"))
tool_list.append(tool_name)
desc = single_tool.get("desc")
if desc is not None:
tool_selection_list.append(desc + " (" + tool_name + ")")
else:
tool_selection_list.append(tool_name)
callback = partial(callback, tool_list)
self.window.show_quick_panel(tool_selection_list, callback, 0, 0, None)
def _on_ask_tool_done(self, command, tool_list, selected_index):
tool_selected = tool_list[selected_index]
if selected_index > -1:
command.run_tool(tool_selected)
def _ask_group_and_profile_to_run(self, callback):
group_list = [single_group["name"] for single_group in settings.get_groups()]
if len(group_list) <= 0:
sublime.error_message("There are no groups configured")
else:
callback = partial(callback, group_list)
self.window.show_quick_panel(group_list, callback, 0, 0, None)
def _on_ask_group_done(self, command, group_list, selected_index):
group_selected = group_list[selected_index]
if selected_index >= 0:
callback = partial(self._on_ask_profile_done, command)
sublime.set_timeout(
partial(self._ask_profile_and_run_command, group_selected, callback), 0
)
def _ask_profile_and_run_command(self, group_selected, callback):
profiles = settings.get_profiles(group_selected)
if len(profiles) <= 0:
sublime.error_message("This group has no profiles configured")
return
profile_list = [profile["name"] for profile in profiles]
self.window.show_quick_panel(
profile_list, partial(callback, group_selected, profile_list), 0, 0, None
)
def _on_ask_profile_done(
self, command, group_selected, profile_list, selected_index
):
if selected_index >= 0:
selected_profile = profile_list[selected_index]
command.run_profile(group_selected, selected_profile)
class ToolRunnerCancelCurrent(sublime_plugin.WindowCommand):
def run(self):
manager.cancel_command_for_view_id(self.window.active_view().id())
class ToolRunnerFocusOutput(sublime_plugin.WindowCommand):
def run(self):
source_view = self.window.active_view()
target_view = manager.get_target_view_for_source_view(source_view)
if target_view is not None:
manager.ensure_visible_view(target_view, focus=True)
else:
util.notify("This view don't have an output")
class ToolRunnerFocusSource(sublime_plugin.WindowCommand):
def run(self):
target_view = self.window.active_view()
source_view = manager.get_source_view_for_target_view(target_view)
if source_view is not None:
manager.ensure_visible_view(source_view, focus=True)
else:
util.notify("This view is not an output")
class ToolRunnerSwitchDefaultProfile(sublime_plugin.WindowCommand):
def run(self, profile_group=None):
debug.log("Switching command for profile group: " + str(profile_group))
if profile_group is None:
self.ask_group_and_switch_profile()
else:
self.switch_profile(profile_group)
def ask_group_and_switch_profile(self):
self.groups = [group["name"] for group in settings.get_groups()]
if len(self.groups) <= 0:
sublime.error_message("There are no groups configured")
return
self.window.show_quick_panel(
self.groups,
partial(self.on_ask_group_done, self.switch_profile),
0,
0,
None,
)
def on_ask_group_done(self, callback, selected_index):
if selected_index < 0:
return
group_selected = self.groups[selected_index]
if selected_index > -1:
sublime.set_timeout(partial(callback, group_selected), 0)
def switch_profile(self, profile_group):
profiles = settings.get_profiles(profile_group)
self.profile_group = profile_group
self.profile_list = [profile["name"] for profile in profiles]
self.window.show_quick_panel(self.profile_list, self.on_ask_profile, 0, 0, None)
def on_ask_profile(self, selected_index):
if selected_index > -1:
selected_profile_name = self.profile_list[selected_index]
current_settings = settings.get_setting("default_profiles", {})
current_settings[self.profile_group] = selected_profile_name
settings.set_setting("default_profiles", current_settings)
self.profile_list = None
self.groups = None
class ToolRunnerOpenSettings(sublime_plugin.WindowCommand):
def __init__(self, *args, **kwargs):
sublime_plugin.WindowCommand.__init__(self, *args, **kwargs)
def run(self, scope=None):
settings.open_settings(self.window, scope)
class ToolRunnerListener(sublime_plugin.EventListener):
def on_close(self, view):
manager.remove_source_view(view)
manager.remove_target_view(view)
def on_post_save(self, view):
# debug.log("Saved view: %s" % view.id())
source_view = manager.get_source_view_for_target_view(view)
if source_view is None:
# debug.log("The view %s is not an output view" % view.id())
return
manager.remove_target_view(view)
view.set_scratch(False)
view.set_read_only(False)
def plugin_loaded():
debug.log("Plugin Loading")
settings.on_loaded()
debug.log("Plugin Loaded")
if settings.get_setting("devel"):
debug.forget_modules()
def plugin_unloaded():
settings.on_unloaded()
debug.log("Plugin Unloaded")
| {
"repo_name": "KuttKatrea/sublime-toolrunner",
"path": "ToolRunner.py",
"copies": "1",
"size": "7862",
"license": "mit",
"hash": 3119225143096177700,
"line_mean": 32.0336134454,
"line_max": 88,
"alpha_frac": 0.6054439074,
"autogenerated": false,
"ratio": 3.8595974472263133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49650413546263134,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import sys, re
if sys.version_info[0] == 3:
identifier = re.compile(r"^[^\d\W]\w*\Z", re.UNICODE)
else:
identifier = re.compile(r"^[^\d\W]\w*\Z")
class CodeState(object):
def __init__(self, name):
self.name = name
self.precode = """import bee
from bee.segments import *
import libcontext
from libcontext.socketclasses import *
from libcontext.pluginclasses import *
"""
self.class_statement = "class %s(bee.worker):" % name
self.place_statement = "def place(self):"
self.classcode = ""
self.placecode = ""
def cstrip(code):
return code.rstrip(" ").rstrip("\n").rstrip("#")
def gen_transistor2(type_, segid, params, metaparams, codestate, m):
if not type_.startswith("("): type_ = "'" + type_ + "'"
codestate.classcode += "%s = transistor(%s)\n\n" % (segid, type_)
if "triggerfunc" in params:
tf = params["triggerfunc"]
if tf is not None and len(tf.strip()):
codestate.classcode += "%s = triggerfunc(%s)\n\n" % (tf, segid)
def gen_transistor(segid, params, metaparams, codestate, m):
type_ = metaparams["type"].strip()
gen_transistor2(type_, segid, params, metaparams, codestate, m)
def gen_weaver(segid, params, metaparams, codestate, m):
inputs = {}
for con in m.connections:
if con.end.segid == segid:
inp = con.end.io
other = con.start.segid
if inp in inputs:
raise Exception("Weaver %s.%s cannot have more than one input" % (segid, inp))
inputs[inp] = other
count = 0
intype = []
while 1:
count += 1
k = "type%d" % count
if k not in metaparams: break
t = metaparams[k].strip()
if len(t):
intype.append(t)
targets = []
for nr, n in enumerate(range(len(intype))):
inp = "inp%d" % (nr + 1)
if inp not in inputs:
raise Exception("Weaver %s.%s must have an input" % (segid, inp))
targets.append(inputs[inp])
line = "%s = weaver(%s, %s)\n\n" % (segid, tuple(intype), ", ".join(targets))
codestate.classcode += line
def gen_unweaver(segid, params, metaparams, codestate, m):
outputs = {}
for con in m.connections:
if con.start.segid == segid:
outp = con.start.io
other = con.end.segid
if outp in outputs:
raise Exception("Unweaver %s.%s cannot have more than one output" % (segid, outp))
outputs[outp] = other
count = 0
outtype = []
while 1:
count += 1
k = "type%d" % count
if k not in metaparams: break
t = metaparams[k].strip()
if len(t):
outtype.append(t)
targets = []
for nr, n in enumerate(range(len(outtype))):
outp = "outp%d" % (nr + 1)
if outp not in outputs:
raise Exception("Unweaver %s.%s must have an output" % (segid, outp))
targets.append(outputs[outp])
line = "%s = unweaver(%s, %s)\n\n" % (segid, tuple(outtype), ", ".join(targets))
codestate.classcode += line
def gen_antenna2(mode, type_, segid, params, metaparams, codestate, m):
if not type_.startswith("("): type_ = "'" + type_ + "'"
line = "%s = antenna('%s', %s)\n\n" % (segid, mode, type_)
codestate.classcode += line
def gen_push_antenna(segid, params, metaparams, codestate, m):
type_ = metaparams["type"].strip()
gen_antenna2("push", type_, segid, params, metaparams, codestate, m)
def gen_pull_antenna(segid, params, metaparams, codestate, m):
type_ = metaparams["type"].strip()
gen_antenna2("pull", type_, segid, params, metaparams, codestate, m)
def gen_output2(mode, type_, segid, params, metaparams, codestate, m):
if not type_.startswith("("): type_ = "'" + type_ + "'"
line = "%s = output('%s', %s)\n" % (segid, mode, type_)
codestate.classcode += line
if type_ == "'trigger'":
if "triggerfunc" in params:
tf = params["triggerfunc"]
if tf is not None and len(tf.strip()):
codestate.classcode += "%s = triggerfunc(%s)\n\n" % (tf, segid)
codestate.classcode += '\n'
def gen_push_output(segid, params, metaparams, codestate, m):
type_ = metaparams["type"].strip()
gen_output2("push", type_, segid, params, metaparams, codestate, m)
def gen_pull_output(segid, params, metaparams, codestate, m):
type_ = metaparams["type"].strip()
gen_output2("pull", type_, segid, params, metaparams, codestate, m)
def gen_variable2(type_, segid, params, metaparams, codestate, m):
if not type_.startswith("("): type_ = "'" + type_ + "'"
codestate.classcode += "%s = variable(%s)\n" % (segid, type_)
if "val" in params and params["val"] not in ("", None):
value = params["val"]
k = "startvalue"
if "is_parameter" in params and params["is_parameter"] == "True":
k = "parameter"
codestate.classcode += "%s(%s, %s)\n\n" % (k, segid, value)
elif "is_parameter" in params and params["is_parameter"] == "True":
codestate.classcode += "parameter(%s)\n\n" % segid
else:
codestate.classcode += "\n"
def gen_variable(segid, params, metaparams, codestate, m):
type_ = metaparams["type"].strip()
gen_variable2(type_, segid, params, metaparams, codestate, m)
def gen_push_buffer2(type_, segid, params, metaparams, codestate, m):
if not type_.startswith("("): type_ = "'" + type_ + "'"
codestate.classcode += "%s = buffer('push', %s)\n\n" % (segid, type_)
if "val" in params and params["val"] not in ("", None):
value = params["val"]
k = "startvalue"
if "is_parameter" in params and params["is_parameter"] == "True":
k = "parameter"
codestate.classcode += "%s(%s, %s)\n\n" % (k, segid, value)
elif "is_parameter" in params and params["is_parameter"] == "True":
codestate.classcode += "parameter(%s)\n\n" % segid
else:
codestate.classcode += "\n"
if "triggerfunc" in params:
tf = params["triggerfunc"]
if tf is not None and len(tf.strip()):
codestate.classcode += "%s = triggerfunc(%s)\n\n" % (tf, segid)
def gen_pull_buffer(segid, params, metaparams, codestate, m):
type_ = metaparams["type"].strip()
gen_pull_buffer2(type_, segid, params, metaparams, codestate, m)
def gen_pull_buffer2(type_, segid, params, metaparams, codestate, m):
if not type_.startswith("("): type_ = "'" + type_ + "'"
codestate.classcode += "%s = buffer('pull', %s)\n\n" % (segid, type_)
if "val" in params and params["val"] not in ("", None):
value = params["val"]
k = "startvalue"
if "is_parameter" in params and params["is_parameter"] == "True":
k = "parameter"
codestate.classcode += "%s(%s, %s)\n\n" % (k, segid, value)
elif "is_parameter" in params and params["is_parameter"] == "True":
codestate.classcode += "parameter(%s)\n\n" % segid
else:
codestate.classcode += "\n"
if "triggerfunc" in params:
tf = params["triggerfunc"]
if tf is not None and len(tf.strip()):
codestate.classcode += "%s = triggerfunc(%s)\n\n" % (tf, segid)
def gen_push_buffer(segid, params, metaparams, codestate, m):
type_ = metaparams["type"].strip()
gen_push_buffer2(type_, segid, params, metaparams, codestate, m)
def gen_modifier(segid, params, metaparams, codestate, m):
code = params.get("code", "pass")
if code is None or len(code.strip()) == 0: code = "pass"
c = "@modifier\n"
c += "def %s(self):\n" % segid
indent = 2
ind = "\n" + " " * indent
c += ind[1:] + ind.join(cstrip(code).split("\n"))
codestate.classcode += c + "\n\n"
def gen_operator(segid, params, metaparams, codestate, m):
intype = metaparams["intype"].strip()
if not intype.startswith("("): intype = '"' + intype + '"'
outtype = metaparams["outtype"].strip()
if not outtype.startswith("("): outtype = '"' + outtype + '"'
code = params.get("code", "pass")
if code is None or len(code.strip()) == 0: code = "pass"
funcname = "operator_func_%s" % segid
c = "def %s(self):\n" % funcname
indent = 2
ind = "\n" + " " * indent
c += ind[1:] + ind.join(cstrip(code).split("\n"))
codestate.precode += c + "\n\n"
codestate.classcode += "%s = operator(%s, %s, %s)\n\n" % (segid, funcname, intype, outtype)
def gen_custom_import_code(segid, params, metaparams, codestate, m):
code = params.get("code", "")
if code is None or len(code.strip()) == 0: return
codestate.precode += "\n" + cstrip(code) + "\n\n"
def gen_custom_class_code(segid, params, metaparams, codestate, m):
code = params.get("code", "")
if code is None or len(code.strip()) == 0: return
codestate.classcode += "\n" + cstrip(code) + "\n\n"
def gen_custom_place_code(segid, params, metaparams, codestate, m):
code = params.get("code", "")
if code is None or len(code.strip()) == 0: return
codestate.placecode += "\n" + cstrip(code) + "\n\n"
generators = {
"transistor.transistor": gen_transistor,
"transistor.transistor_int": partial(gen_transistor2, "int"),
"transistor.transistor_float": partial(gen_transistor2, "float"),
"transistor.transistor_bool": partial(gen_transistor2, "bool"),
"transistor.transistor_str": partial(gen_transistor2, "str"),
"transistor.transistor_id": partial(gen_transistor2, "id"),
"unweaver.unweaver": gen_unweaver,
"weaver.weaver": gen_weaver,
"antenna.push_antenna": gen_push_antenna,
"antenna.push_antenna_trigger": partial(gen_antenna2, "push", "trigger"),
"antenna.push_antenna_int": partial(gen_antenna2, "push", "int"),
"antenna.push_antenna_float": partial(gen_antenna2, "push", "float"),
"antenna.push_antenna_bool": partial(gen_antenna2, "push", "bool"),
"antenna.push_antenna_str": partial(gen_antenna2, "push", "str"),
"antenna.push_antenna_id": partial(gen_antenna2, "push", "id"),
"antenna.pull_antenna": gen_pull_antenna,
"antenna.pull_antenna_int": partial(gen_antenna2, "pull", "int"),
"antenna.pull_antenna_float": partial(gen_antenna2, "pull", "float"),
"antenna.pull_antenna_bool": partial(gen_antenna2, "pull", "bool"),
"antenna.pull_antenna_str": partial(gen_antenna2, "pull", "str"),
"antenna.pull_antenna_id": partial(gen_antenna2, "pull", "id"),
"output.push_output": gen_push_output,
"output.push_output_trigger": partial(gen_output2, "push", "trigger"),
"output.push_output_int": partial(gen_output2, "push", "int"),
"output.push_output_float": partial(gen_output2, "push", "float"),
"output.push_output_bool": partial(gen_output2, "push", "bool"),
"output.push_output_str": partial(gen_output2, "push", "str"),
"output.push_output_id": partial(gen_output2, "push", "id"),
"output.pull_output": gen_pull_output,
"output.pull_output_int": partial(gen_output2, "pull", "int"),
"output.pull_output_float": partial(gen_output2, "pull", "float"),
"output.pull_output_bool": partial(gen_output2, "pull", "bool"),
"output.pull_output_str": partial(gen_output2, "pull", "str"),
"output.pull_output_id": partial(gen_output2, "pull", "id"),
"variable.variable": gen_variable,
"variable.variable_int": partial(gen_variable2, "int"),
"variable.variable_float": partial(gen_variable2, "float"),
"variable.variable_bool": partial(gen_variable2, "bool"),
"variable.variable_str": partial(gen_variable2, "str"),
"variable.variable_id": partial(gen_variable2, "id"),
"modifier.modifier": gen_modifier,
"operator.operator": gen_operator,
"buffer.push_buffer": gen_push_buffer,
"buffer.push_buffer_int": partial(gen_push_buffer2, "int"),
"buffer.push_buffer_float": partial(gen_push_buffer2, "float"),
"buffer.push_buffer_bool": partial(gen_push_buffer2, "bool"),
"buffer.push_buffer_str": partial(gen_push_buffer2, "str"),
"buffer.push_buffer_id": partial(gen_push_buffer2, "id"),
"buffer.pull_buffer": gen_pull_buffer,
"buffer.pull_buffer_int": partial(gen_pull_buffer2, "int"),
"buffer.pull_buffer_float": partial(gen_pull_buffer2, "float"),
"buffer.pull_buffer_bool": partial(gen_pull_buffer2, "bool"),
"buffer.pull_buffer_str": partial(gen_pull_buffer2, "str"),
"buffer.pull_buffer_id": partial(gen_pull_buffer2, "id"),
"custom_code.custom_import_code": gen_custom_import_code,
"custom_code.custom_class_code": gen_custom_class_code,
"custom_code.custom_place_code": gen_custom_place_code,
}
def _find_segment(segid, segments):
for seg in segments:
if seg.segid == segid:
return seg
raise KeyError(segid)
def _add_segment(seg, segments, allsegments, segids=None):
if segids is None: segids = []
segid = seg.segid
if segid in segids: return
if seg.segtype.endswith(".weaver"):
for con in m.connections:
if con.end.segid == segid:
other = allsegments[con.start.segid]
if other not in segids: _add_segment(other, segments, allsegments, segids)
elif seg.segtype.endswith(".unweaver"):
for con in m.connections:
if con.start.segid == segid:
other = allsegments[con.end.segid]
if other not in segids: _add_segment(other, segments, allsegments, segids)
segids.append(segid)
segments.append(seg)
def workergen(name, m):
codestate = CodeState(name)
if m.docstring is not None:
codestate.classcode += "\"\"\"\n" + m.docstring + "\"\"\"\n"
# Check for duplicate segids
segids0 = set()
#if m.docstring is not None:
# codestate.classcode += "\"\"\"\n" + m.docstring + "\"\"\"\n"
def check_segid(segid):
if re.match(identifier, segid) is None:
raise Exception("Invalid segment ID '%s'" % segid)
if segid in segids0:
raise Exception("Duplicate segment ID '%s'" % segid)
segids0.add(segid)
for seg in m.segments:
segid = seg.segid
check_segid(segid)
if seg.parameters is not None:
for par in seg.parameters:
if par.pname == "triggerfunc":
v = par.pvalue
if v is not None and len(v.strip()):
check_segid(v)
#Reshuffle segment order in case of weavers/unweavers
allsegments = dict([(seg.segid, seg) for seg in m.segments])
segments = []
for seg in m.segments:
_add_segment(seg, segments, allsegments)
#Generate code for segments
for seg in segments:
assert seg.segtype.startswith("segments.")
segtype = seg.segtype[len("segments."):]
params = {}
if seg.parameters is not None:
params = dict([(p.pname, p.pvalue) for p in seg.parameters])
metaparams = {}
if seg.metaparameters is not None:
metaparams = dict([(p.pname, p.pvalue) for p in seg.metaparameters])
generators[segtype](seg.segid, params, metaparams, codestate, m)
#segid-to-type dict
tdic = {}
for seg in segments:
type_ = seg.segtype.split(".")[-1]
tdic[seg.segid] = type_
#Generate code for connections
for connection_name in m.connections:
start_segmnent_id = connection_name.start.segid
end_segment_id = connection_name.end.segid
t1 = tdic[start_segmnent_id]
t2 = tdic[end_segment_id]
seg1 = allsegments[start_segmnent_id]
seg2 = allsegments[end_segment_id]
source, target = start_segmnent_id, end_segment_id
arg = None
k = "connect"
if t1.startswith("push_antenna"):
if t1 == "push_antenna_trigger" or t1 == "push_antenna" and seg1.type == "trigger":
#Special case: ("push", "trigger") may send triggers
k = "trigger"
else:
pass
elif t1.startswith("pull_antenna"):
pass
elif t1.startswith("transistor"):
pass
elif t1 == "weaver":
pass
elif t1 == "unweaver":
continue
elif t1 == "test":
k = "trigger"
elif t1 == "modifier":
k = "trigger"
elif t1.startswith("variable"):
if connection_name.start.io == "pre_update":
k = "pretrigger"
arg = "update"
elif connection_name.start.io == "pre_output":
k = "pretrigger"
elif connection_name.start.io == "on_update":
k = "trigger"
elif connection_name.start.io == "on_output":
k = "trigger"
arg = "output"
elif t1.startswith("push_buffer"):
if connection_name.start.io == "pre_update":
k = "pretrigger"
elif connection_name.start.io == "on_update":
k = "trigger"
elif t1.startswith("pull_buffer"):
if connection_name.start.io == "pre_output":
k = "pretrigger"
elif connection_name.start.io == "on_output":
k = "trigger"
else:
raise Exception(t1)
if t2 == "weaver": continue
if arg is None:
line = '%s(%s, %s)\n' % (k, source, target)
else:
line = '%s(%s, %s, "%s")\n' % (k, source, target, arg)
codestate.classcode += line
#Join code
if codestate.classcode == "" and codestate.placecode == "":
codestate.classcode = "pass"
code = codestate.precode + "\n"
code += codestate.class_statement + "\n"
indent = 2
ind = "\n" + " " * indent
code += ind[1:] + ind.join(codestate.classcode.split("\n"))
if len(codestate.placecode):
code += ind + codestate.place_statement
indent = 4
ind = "\n" + " " * indent
code += ind[1:] + ind.join(codestate.placecode.split("\n"))
return code
| {
"repo_name": "agoose77/hivesystem",
"path": "hiveguilib/workergen.py",
"copies": "1",
"size": "18058",
"license": "bsd-2-clause",
"hash": 2504644721459431400,
"line_mean": 36.5426195426,
"line_max": 98,
"alpha_frac": 0.5883818806,
"autogenerated": false,
"ratio": 3.210311111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4298692991711111,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import tempfile
import cloudvolume
import numpy as np
import shutil
import posixpath
import pytest
import os
from scipy import sparse
import sys
import json
import re
tempdir = tempfile.mkdtemp()
TEST_PATH = "file://{}".format(tempdir)
TEST_DATASET_NAME = "testvol"
PRECOMPUTED_MESH_TEST_DATASET_NAME = "meshvol_precompute"
DRACO_MESH_TEST_DATASET_NAME = "meshvol_draco"
GRAPHENE_SHARDED_MESH_TEST_DATASET_NAME = "meshvol_graphene_sharded"
PCG_LOCATION = "https://www.dynamicannotationframework.com/"
TEST_SEG_ID = 144115188084020434
TEST_GRAPHENE_SHARDED_ID = 864691135213153080
TEST_TOKEN = "2371270ab23f129cc121dedbeef01294"
def test_graphene_auth_token(graphene_vol):
cloudpath = "graphene://" + posixpath.join(PCG_LOCATION, 'segmentation', 'api/v1/', TEST_DATASET_NAME)
cloudvolume.CloudVolume(cloudpath, secrets=TEST_TOKEN)
cloudvolume.CloudVolume(cloudpath, secrets={ "token": TEST_TOKEN })
try:
cloudvolume.CloudVolume(cloudpath, secrets=None)
except cloudvolume.exceptions.AuthenticationError:
pass
try:
cloudvolume.CloudVolume(cloudpath, secrets={ "token": "Z@(ASINAFSOFAFOSNS" })
assert False
except cloudvolume.exceptions.AuthenticationError:
pass
@pytest.fixture()
def cv_graphene_mesh_precomputed(requests_mock):
test_dir = os.path.dirname(os.path.abspath(__file__))
graphene_test_cv_dir = os.path.join(test_dir,'test_cv')
graphene_test_cv_path = "file://{}".format(graphene_test_cv_dir)
info_d = {
"app": {
"supported_api_versions": [
0,
1
]
},
"data_dir": graphene_test_cv_path,
"data_type": "uint64",
"graph": {
"chunk_size": [
512,
512,
128
],
"n_layers": 9,
"n_bits_for_layer_id": 8,
"spatial_bit_masks": {
x: 10 for x in range(255)
},
},
"chunks_start_at_voxel_offset": False,
"mesh": "mesh_mip_2_err_40_sv16",
"num_channels": 1,
"scales": [{
"chunk_sizes": [[ 512, 512, 16 ]],
"compressed_segmentation_block_size": [
8,
8,
8
],
"encoding": "compressed_segmentation",
"key": "8_8_40",
"resolution": [
8,
8,
40
],
"size": [
43520,
26112,
2176
],
"voxel_offset": [
17920,
14848,
0
]
}],
"type": "segmentation"
}
requests_mock.get(posixpath.join(PCG_LOCATION,
'segmentation/table',
PRECOMPUTED_MESH_TEST_DATASET_NAME,
"info"), json=info_d)
frag_files = os.listdir(os.path.join(graphene_test_cv_dir, info_d['mesh']))
# the file are saved as .gz but we want to list the non gz version
# as cloudvolume will take care of finding the compressed files
frag_files = [f[:-3] for f in frag_files if f[0]=='9']
frag_d = {'fragments':frag_files}
mock_url = posixpath.join(PCG_LOCATION,
"meshing/api/v1/table",
PRECOMPUTED_MESH_TEST_DATASET_NAME,
"manifest/{}:0?verify=True".format(TEST_SEG_ID))
requests_mock.get(mock_url, json=frag_d)
cloudpath = posixpath.join(PCG_LOCATION,
'segmentation/table',
PRECOMPUTED_MESH_TEST_DATASET_NAME)
yield cloudvolume.CloudVolume("graphene://" + cloudpath, secrets=TEST_TOKEN)
@pytest.fixture()
def cv_graphene_mesh_draco(requests_mock):
test_dir = os.path.dirname(os.path.abspath(__file__))
graphene_test_cv_dir = os.path.join(test_dir,'test_cv')
graphene_test_cv_path = "file://{}".format(graphene_test_cv_dir)
info_d = {
"app": {
"supported_api_versions": [
0,
1
]
},
"data_dir": graphene_test_cv_path,
"data_type": "uint64",
"graph": {
"chunk_size": [
512,
512,
128
],
"n_layers": 9,
"n_bits_for_layer_id": 8,
"spatial_bit_masks": {
x: 10 for x in range(255)
},
},
"chunks_start_at_voxel_offset": False,
"mesh": "mesh_mip_2_draco_sv16",
"mesh_metadata": {
"max_meshed_layer": 6,
"uniform_draco_grid_size": 21
},
"num_channels": 1,
"scales": [{
"chunk_sizes": [[ 512, 512, 16 ]],
"compressed_segmentation_block_size": [
8,
8,
8
],
"encoding": "compressed_segmentation",
"key": "8_8_40",
"resolution": [
8,
8,
40
],
"size": [
43520,
26112,
2176
],
"voxel_offset": [
17920,
14848,
0
]
}],
"type": "segmentation"
}
infourl = posixpath.join(PCG_LOCATION,
'segmentation/table',
DRACO_MESH_TEST_DATASET_NAME,
"info")
requests_mock.get(infourl, json=info_d)
frag_files = os.listdir(os.path.join(graphene_test_cv_dir, info_d['mesh']))
# we want to filter out the manifest file
frag_files = [ f for f in frag_files if f[0] == '1' ]
frag_d = { 'fragments': frag_files }
mock_url = posixpath.join(
PCG_LOCATION, 'meshing/api/v1/table',
DRACO_MESH_TEST_DATASET_NAME,
"manifest/{}:0?verify=True".format(TEST_SEG_ID)
)
requests_mock.get(mock_url, json=frag_d)
cloudpath = posixpath.join(PCG_LOCATION,
'segmentation/table',
DRACO_MESH_TEST_DATASET_NAME)
yield cloudvolume.CloudVolume('graphene://' + cloudpath, secrets=TEST_TOKEN)
@pytest.fixture()
def cv_graphene_sharded(requests_mock):
test_dir = os.path.dirname(os.path.abspath(__file__))
graphene_test_cv_dir = os.path.join(test_dir,'test_cv')
graphene_test_cv_path = "gs://seunglab-test/graphene/meshes"
with open(os.path.join(graphene_test_cv_dir, 'sharded_info.json'), 'r') as fp:
info_d = json.load(fp)
info_d['data_dir']=graphene_test_cv_path
infourl = posixpath.join(PCG_LOCATION,
'segmentation/table',
GRAPHENE_SHARDED_MESH_TEST_DATASET_NAME,
"info")
requests_mock.get(infourl, json=info_d)
valid_manifest={
"fragments": [
"~6/29568-0.shard:765877565:4454",
"~6/50112-0.shard:129695820:17794",
"~7/3296-0.shard:727627771:13559",
"~7/3264-2.shard:660015424:21225",
"~7/6400-3.shard:478017968:31760",
"~7/7424-2.shard:9298231:40730",
"~7/4320-0.shard:13324264:53780",
"~6/29568-0.shard:27890566:21061",
"516154738544909386:0:40960-49152_57344-65536_0-16384"
],
"seg_ids": [
440473154180453586,
446120245900606131,
511651138917622208,
511580770173215172,
518476907102810232,
520728706916532712,
513902938730988744,
440473154180397181,
516154738544909386
]
}
speculative_manifest = {
"fragments": [
"~440473154180453586:6:440473154180087808:29568-0.shard:929",
"~511651138917622208:7:511651138915794944:3296-0.shard:481",
"~520728706916532712:7:520728706914713600:7424-2.shard:824",
"~518476907102810232:7:518476907101028352:6400-3.shard:699",
"~511580770173215172:7:511580770171617280:3264-2.shard:745",
"~513902938730988744:7:513902938729480192:4320-0.shard:170",
"~446120245900606131:6:446120245900345344:50112-0.shard:629",
"~440473154180397181:6:440473154180087808:29568-0.shard:38",
"516154738544909386:0:40960-49152_57344-65536_0-16384"
],
"seg_ids": [
440473154180453586,
511651138917622208,
520728706916532712,
518476907102810232,
511580770173215172,
513902938730988744,
446120245900606131,
440473154180397181,
516154738544909386
]
}
verify_manifest_url = posixpath.join(
PCG_LOCATION, 'meshing/api/v1/table',
GRAPHENE_SHARDED_MESH_TEST_DATASET_NAME,
"manifest/{}:0?verify=True".format(TEST_GRAPHENE_SHARDED_ID)
)
speculative_manifest_url = posixpath.join(
PCG_LOCATION, 'meshing/api/v1/table',
GRAPHENE_SHARDED_MESH_TEST_DATASET_NAME,
"manifest/{}:0?verify=False".format(TEST_GRAPHENE_SHARDED_ID)
)
requests_mock.get(verify_manifest_url, json=valid_manifest)
requests_mock.get(speculative_manifest_url, json=speculative_manifest)
matcher = re.compile('https://storage.googleapis.com/')
requests_mock.get(matcher,real_http=True)
cloudpath = posixpath.join(PCG_LOCATION, 'segmentation/table/', GRAPHENE_SHARDED_MESH_TEST_DATASET_NAME)
yield cloudvolume.CloudVolume('graphene://' + cloudpath, use_https=True, secrets=TEST_TOKEN)
@pytest.fixture(scope='session')
def cv_supervoxels(N=64, blockN=16):
block_per_row = int(N / blockN)
chunk_size = [32, 32, 32]
info = cloudvolume.CloudVolume.create_new_info(
num_channels=1,
layer_type='segmentation',
data_type='uint64',
encoding='raw',
resolution=[4, 4, 40], # Voxel scaling, units are in nanometers
voxel_offset=[0, 0, 0], # x,y,z offset in voxels from the origin
# Pick a convenient size for your underlying chunk representation
# Powers of two are recommended, doesn't need to cover image exactly
chunk_size=chunk_size, # units are voxels
volume_size=[N, N, N],
)
vol = cloudvolume.CloudVolume(TEST_PATH, info=info)
vol.commit_info()
xx, yy, zz = np.meshgrid(*[np.arange(0, N) for cs in chunk_size])
id_ind = (
np.uint64(xx / blockN),
np.uint64(yy / blockN),
np.uint64(zz / blockN)
)
id_shape = (block_per_row, block_per_row, block_per_row)
seg = np.ravel_multi_index(id_ind, id_shape)
vol[:] = np.uint64(seg)
yield TEST_PATH
shutil.rmtree(tempdir)
@pytest.fixture()
def graphene_vol(cv_supervoxels, requests_mock, monkeypatch, N=64):
chunk_size = [32, 32, 32]
info_d = {
"data_dir": cv_supervoxels,
"data_type": "uint64",
"graph": {
"chunk_size": [64, 64, 64],
"bounding_box": [2048, 2048, 512],
"chunk_size": [256, 256, 512],
"cv_mip": 0,
"n_bits_for_layer_id": 8,
"n_layers": 12,
"spatial_bit_masks": {
'1': 10, '2': 10, '3': 9,
'4': 8, '5': 7, '6': 6,
'7': 5, '8': 4, '9': 3,
'10': 2, '11': 1, '12': 1
}
},
"app": { "supported_api_versions": [0, 1] },
"mesh": "mesh_mip_2_err_40_sv16",
"num_channels": 1,
"scales": [
{
"chunk_sizes": [
[32, 32, 32]
],
"compressed_segmentation_block_size": [8, 8, 8],
"encoding": "compressed_segmentation",
"key": "4_4_40",
"resolution": [4, 4, 40],
"size": [N, N, N],
"voxel_offset": [0, 0, 0]
}
],
"type": "segmentation"
}
infourl = posixpath.join(PCG_LOCATION, 'segmentation/table', TEST_DATASET_NAME, "info")
requests_mock.get(infourl, json=info_d)
def mock_get_leaves(self, root_id, bbox, mip):
return np.array([0,1,2,3], dtype=np.uint64)
cloudpath = "graphene://" + posixpath.join(PCG_LOCATION, 'segmentation', 'api/v1/', TEST_DATASET_NAME)
gcv = cloudvolume.CloudVolume(cloudpath, secrets=TEST_TOKEN)
gcv.get_leaves = partial(mock_get_leaves, gcv)
yield gcv
def test_gcv(graphene_vol):
cutout = graphene_vol.download(np.s_[0:5,0:5,0:5], segids=[999])
assert (np.all(cutout==999))
cutout_sv = graphene_vol[0:5,0:5,0:5]
assert cutout_sv.shape == (5,5,5,1)
assert graphene_vol[0,0,0].shape == (1,1,1,1)
def test_get_roots(graphene_vol):
roots = graphene_vol.get_roots([])
assert np.all(roots == [])
segids = [0, 0, 0, 0, 0]
roots = graphene_vol.get_roots(segids)
assert np.all(roots == segids)
segids = [0, 864691135462849854, 864691135462849854, 0]
roots = graphene_vol.get_roots(segids)
assert np.all(roots == segids)
def faces_to_edges(faces, return_index=False):
"""
Given a list of faces (n,3), return a list of edges (n*3,2)
Parameters
-----------
faces : (n, 3) int
Vertex indices representing faces
Returns
-----------
edges : (n*3, 2) int
Vertex indices representing edges
"""
faces = np.asanyarray(faces)
# each face has three edges
edges = faces[:, [0, 1, 1, 2, 2, 0]].reshape((-1, 2))
if return_index:
# edges are in order of faces due to reshape
face_index = np.tile(np.arange(len(faces)),
(3, 1)).T.reshape(-1)
return edges, face_index
return edges
def create_csgraph(vertices, edges, euclidean_weight=True, directed=False):
'''
Builds a csr graph from vertices and edges, with optional control
over weights as boolean or based on Euclidean distance.
'''
if euclidean_weight:
xs = vertices[edges[:,0]]
ys = vertices[edges[:,1]]
weights = np.linalg.norm(xs-ys, axis=1)
use_dtype = np.float32
else:
weights = np.ones((len(edges),)).astype(np.int8)
use_dtype = np.int8
if directed:
edges = edges.T
else:
edges = np.concatenate([edges.T, edges.T[[1, 0]]], axis=1)
weights = np.concatenate([weights, weights]).astype(dtype=use_dtype)
csgraph = sparse.csr_matrix((weights, edges),
shape=[len(vertices), ] * 2,
dtype=use_dtype)
return csgraph
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires python3 or higher")
def test_decode_segid(cv_graphene_mesh_draco):
decoded = cv_graphene_mesh_draco.meta.decode_label(648518346349515986)
assert decoded.level == 9
assert decoded.x == 0
assert decoded.y == 0
assert decoded.z == 0
assert decoded.segid == 8164562
level = '00000101' # 5
x = '0000000001' # 1
y = '0000000010' # 2
z = '0000000011' # 3
segid = '00000000000000000000001010' # 10
label = int(level + x + y + z + segid, 2)
decoded = cv_graphene_mesh_draco.meta.decode_label(label)
assert decoded.level == 5
assert decoded.x == 1
assert decoded.y == 2
assert decoded.z == 3
assert decoded.segid == 10
encoded = cv_graphene_mesh_draco.meta.encode_label(*decoded)
assert decoded == cv_graphene_mesh_draco.meta.decode_label(encoded)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires python3 or higher")
def test_graphene_mesh_get_precomputed(cv_graphene_mesh_precomputed):
mesh = cv_graphene_mesh_precomputed.mesh.get(TEST_SEG_ID)
edges = faces_to_edges(mesh[TEST_SEG_ID].faces)
graph = create_csgraph(mesh[TEST_SEG_ID].vertices,
edges,
directed=False)
ccs, labels = sparse.csgraph.connected_components(graph,
directed=False)
assert(ccs==3)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires python3 or higher")
def test_graphene_mesh_get_draco(cv_graphene_mesh_draco):
mesh = cv_graphene_mesh_draco.mesh.get(TEST_SEG_ID)
edges = faces_to_edges(mesh[TEST_SEG_ID].faces)
graph = create_csgraph(mesh[TEST_SEG_ID].vertices,
edges,
directed=False)
ccs, labels = sparse.csgraph.connected_components(graph,
directed=False)
assert(ccs==3)
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires python3 or higher")
def test_graphene_mesh_get_graphene_sharded(cv_graphene_sharded):
mesh = cv_graphene_sharded.mesh.get(TEST_GRAPHENE_SHARDED_ID)
edges = faces_to_edges(mesh[TEST_GRAPHENE_SHARDED_ID].faces)
graph = create_csgraph(mesh[TEST_GRAPHENE_SHARDED_ID].vertices,
edges,
directed=False)
ccs, labels = sparse.csgraph.connected_components(graph,
directed=False)
assert(ccs==21)
| {
"repo_name": "seung-lab/cloud-volume",
"path": "test/test_graphene.py",
"copies": "1",
"size": "15435",
"license": "bsd-3-clause",
"hash": -1918644976700846300,
"line_mean": 29.0877192982,
"line_max": 106,
"alpha_frac": 0.6206673145,
"autogenerated": false,
"ratio": 2.9739884393063583,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8968080966410275,
"avg_score": 0.025314957479216613,
"num_lines": 513
} |
from functools import partial
import tensorflow as tf
from tensorflow.contrib.framework import add_arg_scope
from tfsnippet.utils import (validate_int_tuple_arg, is_integer,
add_name_and_scope_arg_doc, InputSpec,
get_static_shape)
from .conv2d_ import conv2d, deconv2d
__all__ = [
'resnet_general_block',
'resnet_conv2d_block',
'resnet_deconv2d_block',
]
@add_arg_scope
@add_name_and_scope_arg_doc
def resnet_general_block(conv_fn,
input,
in_channels,
out_channels,
kernel_size,
strides=1,
shortcut_conv_fn=None,
shortcut_kernel_size=1,
shortcut_force_conv=False,
resize_at_exit=False,
activation_fn=None,
normalizer_fn=None,
dropout_fn=None,
name=None,
scope=None):
"""
A general implementation of ResNet block.
The architecture of this ResNet implementation follows the work
"Wide residual networks" (Zagoruyko & Komodakis, 2016). It basically does
the following things:
.. code-block:: python
shortcut = input
if strides != 1 or in_channels != out_channels or shortcut_force_conv:
shortcut = conv_fn(
input=shortcut,
out_channels=out_channels,
kernel_size=shortcut_kernel_size,
strides=strides,
scope='shortcut'
)
residual = input
residual = conv_fn(
input=activation_fn(normalizer_fn(residual)),
out_channels=in_channels if resize_at_exit else out_channels,
kernel_size=kernel_size,
strides=strides,
scope='conv'
)
residual = dropout_fn(residual)
residual = conv_fn(
input=activation_fn(normalizer_fn(residual)),
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
scope='conv_1'
)
output = shortcut + residual
Args:
conv_fn: The convolution function for "conv" and "conv_1"
convolutional layers. It must accept, and only expect, 5 named
arguments ``(input, out_channels, kernel_size, strides, scope)``.
input (Tensor): The input tensor.
in_channels (int): The channel numbers of the tensor.
out_channels (int): The channel numbers of the output.
kernel_size (int or tuple[int]): Kernel size over spatial dimensions,
for "conv" and "conv_1" convolutional layers.
strides (int or tuple[int]): Strides over spatial dimensions,
for all three convolutional layers.
shortcut_conv_fn: The convolution function for the "shortcut"
convolutional layer. It must accept, and only expect, 5 named
arguments ``(input, out_channels, kernel_size, strides, scope)``.
If not specified, use `conv_fn`.
shortcut_kernel_size (int or tuple[int]): Kernel size over spatial
dimensions, for the "shortcut" convolutional layer.
shortcut_force_conv (bool): If :obj:`True`, force to apply a linear
convolution transformation on the shortcut path.
Otherwise (by default) only apply the transformation if necessary.
resize_at_exit (bool): If :obj:`True`, resize the spatial dimensions
at the "conv_1" convolutional layer. If :obj:`False`, resize at
the "conv" convolutional layer. (see above)
activation_fn: The activation function.
normalizer_fn: The normalizer function.
dropout_fn: The dropout function.
Returns:
tf.Tensor: The output tensor.
"""
def validate_size_tuple(n, s):
if is_integer(s):
# Do not change a single integer into a tuple!
# This is because we do not know the dimensionality of the
# convolution operation here, so we cannot build the size
# tuple with correct number of elements from the integer notation.
return int(s)
return validate_int_tuple_arg(n, s)
def has_non_unit_item(x):
if is_integer(x):
return x != 1
else:
return any(i != 1 for i in x)
# check the parameters
input = tf.convert_to_tensor(input)
in_channels = int(in_channels)
out_channels = int(out_channels)
kernel_size = validate_size_tuple('kernel_size', kernel_size)
strides = validate_size_tuple('strides', strides)
shortcut_kernel_size = validate_size_tuple(
'shortcut_kernel_size', shortcut_kernel_size)
if shortcut_conv_fn is None:
shortcut_conv_fn = conv_fn
# define two types of convolution operations: resizing conv, and
# size keeping conv
def resize_conv(input, kernel_size, scope, conv_fn=conv_fn):
return conv_fn(input=input, out_channels=out_channels,
kernel_size=kernel_size, strides=strides,
scope=scope)
def keep_conv(input, kernel_size, n_channels, scope):
return conv_fn(input=input, out_channels=n_channels,
kernel_size=kernel_size, strides=1,
scope=scope)
# define a helper to apply fn on input `x`
def apply_fn(fn, x, scope):
if fn is not None:
with tf.variable_scope(scope):
x = fn(x)
return x
with tf.variable_scope(scope, default_name=name or 'resnet_general_block'):
# build the shortcut path
if has_non_unit_item(strides) or in_channels != out_channels or \
shortcut_force_conv:
shortcut = resize_conv(
input, shortcut_kernel_size, scope='shortcut',
conv_fn=shortcut_conv_fn
)
else:
shortcut = input
# build the residual path
if resize_at_exit:
conv0 = partial(
keep_conv, kernel_size=kernel_size, scope='conv',
n_channels=in_channels
)
conv1 = partial(
resize_conv, kernel_size=kernel_size, scope='conv_1')
else:
conv0 = partial(
resize_conv, kernel_size=kernel_size, scope='conv')
conv1 = partial(
keep_conv, kernel_size=kernel_size, scope='conv_1',
n_channels=out_channels
)
with tf.variable_scope('residual'):
residual = input
residual = apply_fn(normalizer_fn, residual, 'norm')
residual = apply_fn(activation_fn, residual, 'activation')
residual = conv0(residual)
residual = apply_fn(dropout_fn, residual, 'dropout')
residual = apply_fn(normalizer_fn, residual, 'norm_1')
residual = apply_fn(activation_fn, residual, 'activation_1')
residual = conv1(residual)
# merge the shortcut path and the residual path
output = shortcut + residual
return output
@add_arg_scope
@add_name_and_scope_arg_doc
def resnet_conv2d_block(input,
out_channels,
kernel_size,
strides=(1, 1),
shortcut_kernel_size=(1, 1),
shortcut_force_conv=False,
channels_last=True,
resize_at_exit=True,
activation_fn=None,
normalizer_fn=None,
weight_norm=False,
dropout_fn=None,
kernel_initializer=None,
kernel_regularizer=None,
kernel_constraint=None,
use_bias=None,
bias_initializer=tf.zeros_initializer(),
bias_regularizer=None,
bias_constraint=None,
trainable=True,
name=None,
scope=None):
"""
2D convolutional ResNet block.
Args:
input (Tensor): The input tensor, at least 4-d.
out_channels (int): The channel numbers of the output.
kernel_size (int or tuple[int]): Kernel size over spatial dimensions,
for "conv" and "conv_1" convolutional layers.
strides (int or tuple[int]): Strides over spatial dimensions,
for all three convolutional layers.
shortcut_kernel_size (int or tuple[int]): Kernel size over spatial
dimensions, for the "shortcut" convolutional layer.
shortcut_force_conv (bool): If :obj:`True`, force to apply a linear
convolution transformation on the shortcut path.
Otherwise (by default) only apply the transformation if necessary.
channels_last (bool): Whether or not the channel axis is the last
axis in `input`? (i.e., the data format is "NHWC")
resize_at_exit (bool): See :func:`resnet_general_block`.
activation_fn: The activation function.
normalizer_fn: The normalizer function.
weight_norm: Passed to :func:`conv2d`.
dropout_fn: The dropout function.
kernel_initializer: Passed to :func:`conv2d`.
kernel_regularizer: Passed to :func:`conv2d`.
kernel_constraint: Passed to :func:`conv2d`.
use_bias: Whether or not to use `bias` in :func:`conv2d`?
If :obj:`True`, will always use bias.
If :obj:`None`, will use bias only if `normalizer_fn` is not given.
If :obj:`False`, will never use bias.
Default is :obj:`None`.
bias_initializer: Passed to :func:`conv2d`.
bias_regularizer: Passed to :func:`conv2d`.
bias_constraint: Passed to :func:`conv2d`.
trainable: Passed to :func:`conv2d`.
Returns:
tf.Tensor: The output tensor.
See Also:
:func:`resnet_general_block`
"""
# check the input and infer the input shape
if channels_last:
input_spec = InputSpec(shape=('...', '?', '?', '?', '*'))
c_axis = -1
else:
input_spec = InputSpec(shape=('...', '?', '*', '?', '?'))
c_axis = -3
input = input_spec.validate('input', input)
in_channels = get_static_shape(input)[c_axis]
# check the functional arguments
if use_bias is None:
use_bias = normalizer_fn is None
# derive the convolution function
conv_fn = partial(
conv2d,
channels_last=channels_last,
weight_norm=weight_norm,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
use_bias=use_bias,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
trainable=trainable,
)
# build the resnet block
return resnet_general_block(
conv_fn,
input=input,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
shortcut_kernel_size=shortcut_kernel_size,
shortcut_force_conv=shortcut_force_conv,
resize_at_exit=resize_at_exit,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
dropout_fn=dropout_fn,
name=name or 'resnet_conv2d_block',
scope=scope
)
@add_arg_scope
@add_name_and_scope_arg_doc
def resnet_deconv2d_block(input,
out_channels,
kernel_size,
strides=(1, 1),
shortcut_kernel_size=(1, 1),
shortcut_force_conv=False,
channels_last=True,
output_shape=None,
resize_at_exit=False,
activation_fn=None,
normalizer_fn=None,
weight_norm=False,
dropout_fn=None,
kernel_initializer=None,
kernel_regularizer=None,
kernel_constraint=None,
use_bias=None,
bias_initializer=tf.zeros_initializer(),
bias_regularizer=None,
bias_constraint=None,
trainable=True,
name=None,
scope=None):
"""
2D deconvolutional ResNet block.
Args:
input (Tensor): The input tensor, at least 4-d.
out_channels (int): The channel numbers of the output.
kernel_size (int or tuple[int]): Kernel size over spatial dimensions,
for "conv" and "conv_1" deconvolutional layers.
strides (int or tuple[int]): Strides over spatial dimensions,
for all three deconvolutional layers.
shortcut_kernel_size (int or tuple[int]): Kernel size over spatial
dimensions, for the "shortcut" deconvolutional layer.
shortcut_force_conv (bool): If :obj:`True`, force to apply a linear
convolution transformation on the shortcut path.
Otherwise (by default) only apply the transformation if necessary.
channels_last (bool): Whether or not the channel axis is the last
axis in `input`? (i.e., the data format is "NHWC")
output_shape: If specified, use this as the shape of the
deconvolution output; otherwise compute the size of each dimension
by::
output_size = input_size * strides
if padding == 'valid':
output_size += max(kernel_size - strides, 0)
resize_at_exit (bool): See :func:`resnet_general_block`.
activation_fn: The activation function.
normalizer_fn: The normalizer function.
weight_norm: Passed to :func:`deconv2d`.
dropout_fn: The dropout function.
kernel_initializer: Passed to :func:`deconv2d`.
kernel_regularizer: Passed to :func:`deconv2d`.
kernel_constraint: Passed to :func:`deconv2d`.
use_bias: Whether or not to use `bias` in :func:`deconv2d`?
If :obj:`True`, will always use bias.
If :obj:`None`, will use bias only if `normalizer_fn` is not given.
If :obj:`False`, will never use bias.
Default is :obj:`None`.
bias_initializer: Passed to :func:`deconv2d`.
bias_regularizer: Passed to :func:`deconv2d`.
bias_constraint: Passed to :func:`deconv2d`.
trainable: Passed to :func:`convdeconv2d2d`.
Returns:
tf.Tensor: The output tensor.
See Also:
:func:`resnet_general_block`
"""
# check the input and infer the input shape
if channels_last:
input_spec = InputSpec(shape=('...', '?', '?', '?', '*'))
c_axis = -1
else:
input_spec = InputSpec(shape=('...', '?', '*', '?', '?'))
c_axis = -3
input = input_spec.validate('input', input)
in_channels = get_static_shape(input)[c_axis]
# check the functional arguments
if use_bias is None:
use_bias = normalizer_fn is None
# derive the convolution function
conv_fn = partial(
deconv2d,
channels_last=channels_last,
weight_norm=weight_norm,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
kernel_constraint=kernel_constraint,
use_bias=use_bias,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
bias_constraint=bias_constraint,
trainable=trainable,
)
def conv_fn2(input, out_channels, kernel_size, strides, scope):
if strides == 1: # the shortcut connection
return conv_fn(
input=input,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
scope=scope
)
else: # the residual connection
return conv_fn(
input=input,
out_channels=out_channels,
output_shape=output_shape,
kernel_size=kernel_size,
strides=strides,
scope=scope
)
# build the resnet block
return resnet_general_block(
conv_fn2,
input=input,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
shortcut_kernel_size=shortcut_kernel_size,
shortcut_force_conv=shortcut_force_conv,
resize_at_exit=resize_at_exit,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
dropout_fn=dropout_fn,
name=name or 'resnet_deconv2d_block',
scope=scope
)
| {
"repo_name": "korepwx/tfsnippet",
"path": "tfsnippet/layers/convolutional/resnet.py",
"copies": "1",
"size": "17170",
"license": "mit",
"hash": 1737517365084588500,
"line_mean": 37.7584650113,
"line_max": 79,
"alpha_frac": 0.5623762376,
"autogenerated": false,
"ratio": 4.343536554515558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5405912792115558,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import tensorflow as tf
import numpy as np
import gym
from stable_baselines import logger
from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.schedules import LinearSchedule
from stable_baselines.common.buffers import ReplayBuffer, PrioritizedReplayBuffer
from stable_baselines.deepq.build_graph import build_train
from stable_baselines.deepq.policies import DQNPolicy
class DQN(OffPolicyRLModel):
"""
The DQN model class.
DQN paper: https://arxiv.org/abs/1312.5602
Dueling DQN: https://arxiv.org/abs/1511.06581
Double-Q Learning: https://arxiv.org/abs/1509.06461
Prioritized Experience Replay: https://arxiv.org/abs/1511.05952
:param policy: (DQNPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) discount factor
:param learning_rate: (float) learning rate for adam optimizer
:param buffer_size: (int) size of the replay buffer
:param exploration_fraction: (float) fraction of entire training period over which the exploration rate is
annealed
:param exploration_final_eps: (float) final value of random action probability
:param exploration_initial_eps: (float) initial value of random action probability
:param train_freq: (int) update the model every `train_freq` steps. set to None to disable printing
:param batch_size: (int) size of a batched sampled from replay buffer for training
:param double_q: (bool) Whether to enable Double-Q learning or not.
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param target_network_update_freq: (int) update the target network every `target_network_update_freq` steps.
:param prioritized_replay: (bool) if True prioritized replay buffer will be used.
:param prioritized_replay_alpha: (float)alpha parameter for prioritized replay buffer.
It determines how much prioritization is used, with alpha=0 corresponding to the uniform case.
:param prioritized_replay_beta0: (float) initial value of beta for prioritized replay buffer
:param prioritized_replay_beta_iters: (int) number of iterations over which beta will be annealed from initial
value to 1.0. If set to None equals to max_timesteps.
:param prioritized_replay_eps: (float) epsilon to add to the TD errors when updating priorities.
:param param_noise: (bool) Whether or not to apply noise to the parameters of the policy.
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, learning_rate=5e-4, buffer_size=50000, exploration_fraction=0.1,
exploration_final_eps=0.02, exploration_initial_eps=1.0, train_freq=1, batch_size=32, double_q=True,
learning_starts=1000, target_network_update_freq=500, prioritized_replay=False,
prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6, param_noise=False,
n_cpu_tf_sess=None, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False, seed=None):
# TODO: replay_buffer refactoring
super(DQN, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose, policy_base=DQNPolicy,
requires_vec_env=False, policy_kwargs=policy_kwargs, seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.param_noise = param_noise
self.learning_starts = learning_starts
self.train_freq = train_freq
self.prioritized_replay = prioritized_replay
self.prioritized_replay_eps = prioritized_replay_eps
self.batch_size = batch_size
self.target_network_update_freq = target_network_update_freq
self.prioritized_replay_alpha = prioritized_replay_alpha
self.prioritized_replay_beta0 = prioritized_replay_beta0
self.prioritized_replay_beta_iters = prioritized_replay_beta_iters
self.exploration_final_eps = exploration_final_eps
self.exploration_initial_eps = exploration_initial_eps
self.exploration_fraction = exploration_fraction
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.gamma = gamma
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.double_q = double_q
self.graph = None
self.sess = None
self._train_step = None
self.step_model = None
self.update_target = None
self.act = None
self.proba_step = None
self.replay_buffer = None
self.beta_schedule = None
self.exploration = None
self.params = None
self.summary = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.step_model
return policy.obs_ph, tf.placeholder(tf.int32, [None]), policy.q_values
def setup_model(self):
with SetVerbosity(self.verbose):
assert not isinstance(self.action_space, gym.spaces.Box), \
"Error: DQN cannot output a gym.spaces.Box action space."
# If the policy is wrap in functool.partial (e.g. to disable dueling)
# unwrap it to check the class type
if isinstance(self.policy, partial):
test_policy = self.policy.func
else:
test_policy = self.policy
assert issubclass(test_policy, DQNPolicy), "Error: the input policy for the DQN model must be " \
"an instance of DQNPolicy."
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.act, self._train_step, self.update_target, self.step_model = build_train(
q_func=partial(self.policy, **self.policy_kwargs),
ob_space=self.observation_space,
ac_space=self.action_space,
optimizer=optimizer,
gamma=self.gamma,
grad_norm_clipping=10,
param_noise=self.param_noise,
sess=self.sess,
full_tensorboard_log=self.full_tensorboard_log,
double_q=self.double_q
)
self.proba_step = self.step_model.proba_step
self.params = tf_util.get_trainable_vars("deepq")
# Initialize the parameters and copy them to the target network.
tf_util.initialize(self.sess)
self.update_target(sess=self.sess)
self.summary = tf.summary.merge_all()
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="DQN",
reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# Create the replay buffer
if self.prioritized_replay:
self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.prioritized_replay_alpha)
if self.prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
else:
prioritized_replay_beta_iters = self.prioritized_replay_beta_iters
self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=self.prioritized_replay_beta0,
final_p=1.0)
else:
self.replay_buffer = ReplayBuffer(self.buffer_size)
self.beta_schedule = None
if replay_wrapper is not None:
assert not self.prioritized_replay, "Prioritized replay buffer is not supported by HER"
self.replay_buffer = replay_wrapper(self.replay_buffer)
# Create the schedule for exploration starting from 1.
self.exploration = LinearSchedule(schedule_timesteps=int(self.exploration_fraction * total_timesteps),
initial_p=self.exploration_initial_eps,
final_p=self.exploration_final_eps)
episode_rewards = [0.0]
episode_successes = []
callback.on_training_start(locals(), globals())
callback.on_rollout_start()
reset = True
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
for _ in range(total_timesteps):
# Take action and update exploration to the newest value
kwargs = {}
if not self.param_noise:
update_eps = self.exploration.value(self.num_timesteps)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = \
-np.log(1. - self.exploration.value(self.num_timesteps) +
self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
with self.sess.as_default():
action = self.act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, info = self.env.step(env_action)
self.num_timesteps += 1
# Stop training if return value is False
callback.update_locals(locals())
if callback.on_step() is False:
break
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, rew
# Store transition in the replay buffer.
self.replay_buffer_add(obs_, action, reward_, new_obs_, done, info)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
if writer is not None:
ep_rew = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done, writer,
self.num_timesteps)
episode_rewards[-1] += reward_
if done:
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
episode_rewards.append(0.0)
reset = True
# Do not train if the warmup phase is not over
# or if there are not enough samples in the replay buffer
can_sample = self.replay_buffer.can_sample(self.batch_size)
if can_sample and self.num_timesteps > self.learning_starts \
and self.num_timesteps % self.train_freq == 0:
callback.on_rollout_end()
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
# pytype:disable=bad-unpacking
if self.prioritized_replay:
assert self.beta_schedule is not None, \
"BUG: should be LinearSchedule when self.prioritized_replay True"
experience = self.replay_buffer.sample(self.batch_size,
beta=self.beta_schedule.value(self.num_timesteps),
env=self._vec_normalize_env)
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(self.batch_size,
env=self._vec_normalize_env)
weights, batch_idxes = np.ones_like(rewards), None
# pytype:enable=bad-unpacking
if writer is not None:
# run loss backprop with summary, but once every 100 steps save the metadata
# (memory, compute time, ...)
if (1 + self.num_timesteps) % 100 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,
dones, weights, sess=self.sess, options=run_options,
run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % self.num_timesteps)
else:
summary, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,
dones, weights, sess=self.sess)
writer.add_summary(summary, self.num_timesteps)
else:
_, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1, dones, weights,
sess=self.sess)
if self.prioritized_replay:
new_priorities = np.abs(td_errors) + self.prioritized_replay_eps
assert isinstance(self.replay_buffer, PrioritizedReplayBuffer)
self.replay_buffer.update_priorities(batch_idxes, new_priorities)
callback.on_rollout_start()
if can_sample and self.num_timesteps > self.learning_starts and \
self.num_timesteps % self.target_network_update_freq == 0:
# Update target network periodically.
self.update_target(sess=self.sess)
if len(episode_rewards[-101:-1]) == 0:
mean_100ep_reward = -np.inf
else:
mean_100ep_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)
num_episodes = len(episode_rewards)
if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:
logger.record_tabular("steps", self.num_timesteps)
logger.record_tabular("episodes", num_episodes)
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring",
int(100 * self.exploration.value(self.num_timesteps)))
logger.dump_tabular()
callback.on_training_end()
return self
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
with self.sess.as_default():
actions, _, _ = self.step_model.step(observation, deterministic=deterministic)
if not vectorized_env:
actions = actions[0]
return actions, None
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions_proba = self.proba_step(observation, state, mask)
if actions is not None: # comparing the action distribution, to given actions
actions = np.array([actions])
assert isinstance(self.action_space, gym.spaces.Discrete)
actions = actions.reshape((-1,))
assert observation.shape[0] == actions.shape[0], "Error: batch sizes differ for actions and observations."
actions_proba = actions_proba[np.arange(actions.shape[0]), actions]
# normalize action proba shape
actions_proba = actions_proba.reshape((-1, 1))
if logp:
actions_proba = np.log(actions_proba)
if not vectorized_env:
if state is not None:
raise ValueError("Error: The environment must be vectorized when using recurrent policies.")
actions_proba = actions_proba[0]
return actions_proba
def get_parameter_list(self):
return self.params
def save(self, save_path, cloudpickle=False):
# params
data = {
"double_q": self.double_q,
"param_noise": self.param_noise,
"learning_starts": self.learning_starts,
"train_freq": self.train_freq,
"prioritized_replay": self.prioritized_replay,
"prioritized_replay_eps": self.prioritized_replay_eps,
"batch_size": self.batch_size,
"target_network_update_freq": self.target_network_update_freq,
"prioritized_replay_alpha": self.prioritized_replay_alpha,
"prioritized_replay_beta0": self.prioritized_replay_beta0,
"prioritized_replay_beta_iters": self.prioritized_replay_beta_iters,
"exploration_final_eps": self.exploration_final_eps,
"exploration_fraction": self.exploration_fraction,
"learning_rate": self.learning_rate,
"gamma": self.gamma,
"verbose": self.verbose,
"observation_space": self.observation_space,
"action_space": self.action_space,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
| {
"repo_name": "hill-a/stable-baselines",
"path": "stable_baselines/deepq/dqn.py",
"copies": "1",
"size": "21439",
"license": "mit",
"hash": -1675789354481695200,
"line_mean": 52.463840399,
"line_max": 126,
"alpha_frac": 0.5847754093,
"autogenerated": false,
"ratio": 4.223601260835303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5308376670135303,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import tensorflow as tf
def tensors_filter(tensors,
includes='',
includes_combine_type='or',
excludes=None,
excludes_combine_type='or'):
# NOTICE: `includes` = [] means nothing to be included, and `excludes` = [] means nothing to be excluded
if excludes is None:
excludes = []
assert isinstance(tensors, (list, tuple)), '`tensors` shoule be a list or tuple!'
assert isinstance(includes, (str, list, tuple)), '`includes` should be a string or a list(tuple) of strings!'
assert includes_combine_type in ['or', 'and'], "`includes_combine_type` should be 'or' or 'and'!"
assert isinstance(excludes, (str, list, tuple)), '`excludes` should be a string or a list(tuple) of strings!'
assert excludes_combine_type in ['or', 'and'], "`excludes_combine_type` should be 'or' or 'and'!"
def _select(filters, combine_type):
if filter in [[], ()]:
return []
filters = filters if isinstance(filters, (list, tuple)) else [filters]
selected = []
for t in tensors:
if combine_type == 'or':
for filt in filters:
if filt in t.name:
selected.append(t)
break
elif combine_type == 'and':
for filt in filters:
if filt not in t.name:
break
else:
selected.append(t)
return selected
include_set = _select(includes, includes_combine_type)
exclude_set = _select(excludes, excludes_combine_type)
select_set = [t for t in include_set if t not in exclude_set]
return select_set
def get_collection(key,
includes='',
includes_combine_type='or',
excludes=None,
excludes_combine_type='or'):
tensors = tf.get_collection(key)
return tensors_filter(tensors,
includes,
includes_combine_type,
excludes,
excludes_combine_type)
global_variables = partial(get_collection, key=tf.GraphKeys.GLOBAL_VARIABLES)
local_variables = partial(get_collection, key=tf.GraphKeys.LOCAL_VARIABLES)
trainable_variables = partial(get_collection, key=tf.GraphKeys.TRAINABLE_VARIABLES)
update_ops = partial(get_collection, key=tf.GraphKeys.UPDATE_OPS)
| {
"repo_name": "LynnHo/AttGAN-Tensorflow",
"path": "tflib/utils/collection.py",
"copies": "1",
"size": "2510",
"license": "mit",
"hash": 5805295523747124000,
"line_mean": 36.4626865672,
"line_max": 113,
"alpha_frac": 0.5685258964,
"autogenerated": false,
"ratio": 4.458259325044405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5526785221444405,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from neupy import algorithms, layers, environment
from plots import draw_countour, weight_quiver
environment.reproducible()
input_data = np.array([
[0.9, 0.3],
[0.5, 0.3],
[0.2, 0.1],
[0.7, 0.5],
[0.1, 0.8],
[0.1, 0.9],
])
target_data = np.array([
[1],
[1],
[1],
[0],
[0],
[0],
])
default_weight = np.array([[-4.], [-4.]])
weights = None
current_epoch = 0
class NoBiasSigmoid(layers.Sigmoid):
def output(self, input_value):
# Miltiply bias by zero to disable it. We need to include it in
# formula, because Theano use update rules for it.
summated = T.dot(input_value, self.weight) + (0 * self.bias)
return self.activation_function(summated)
def copy_weight(weight):
return weight.get_value().copy()
def save_epoch_weight(net):
""" Signal processor which save weight update for every
epoch.
"""
global weights
global current_epoch
input_layer_weight = copy_weight(net.input_layer.weight)
weights[:, current_epoch + 1:current_epoch + 2] = input_layer_weight
def get_connection():
""" Generate new connections every time when we call it.
"""
input_layer = NoBiasSigmoid(2, weight=default_weight.copy())
output_layer = layers.Output(1)
return input_layer > output_layer
def draw_quiver(network_class, name, color='r'):
""" Train algorithm and draw quiver for every epoch update
for this algorithm.
"""
global weights
global current_epoch
bpn = network_class(
get_connection(),
step=0.3,
epoch_end_signal=save_epoch_weight
)
# 1000 is an upper limit for all network epochs, later we
# will fix it size
weights = np.zeros((2, 1000))
weights[:, 0:1] = default_weight.copy()
current_epoch = 0
while bpn.prediction_error(input_data, target_data) > 0.125:
bpn.train(input_data, target_data, epochs=1)
current_epoch += 1
weights = weights[:, :current_epoch + 1]
weight_quiver(weights, color=color)
label = "{name} ({n} steps)".format(name=name, n=current_epoch)
return mpatches.Patch(color=color, label=label)
def target_function(network, x, y):
weight = network.input_layer.weight
new_weight = np.array([[x], [y]])
weight.set_value(new_weight)
return network.prediction_error(input_data, target_data)
# Get data for countour plot
bp_network = algorithms.GradientDescent(
get_connection(),
step=0.3,
epoch_end_signal=save_epoch_weight
)
network_target_function = partial(target_function, bp_network)
plt.figure()
plt.title("Approximation function contour plot")
plt.xlabel("First weight")
plt.ylabel("Second weight")
draw_countour(
np.linspace(-5, 5, 50),
np.linspace(-5, 5, 50),
network_target_function
)
cgnet_class = partial(algorithms.ConjugateGradient,
addons=[algorithms.LinearSearch])
momentum_class = partial(algorithms.Momentum, batch_size='full')
algorithms = (
(algorithms.GradientDescent, 'Gradient Descent', 'k'),
(momentum_class, 'Momentum', 'g'),
(algorithms.RPROP, 'RPROP', 'm'),
(algorithms.IRPROPPlus, 'iRPROP+', 'r'),
(cgnet_class, 'Conjugate Gradient', 'y'),
)
patches = []
for algorithm, algorithm_name, color in algorithms:
print("Train '{}' network".format(algorithm_name))
quiver_patch = draw_quiver(algorithm, algorithm_name, color)
patches.append(quiver_patch)
plt.legend(handles=patches)
plt.show()
| {
"repo_name": "stczhc/neupy",
"path": "examples/gd/gd_algorithms_visualization.py",
"copies": "1",
"size": "3630",
"license": "mit",
"hash": 7173010037828774000,
"line_mean": 24.7446808511,
"line_max": 72,
"alpha_frac": 0.658953168,
"autogenerated": false,
"ratio": 3.348708487084871,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9507661655084871,
"avg_score": 0,
"num_lines": 141
} |
from functools import partial
import torch
from torch import Tensor
import math
import torch.nn.functional as F
from . import register_monotonic_attention
from .monotonic_multihead_attention import (
MonotonicMultiheadAttentionWaitK,
MonotonicMultiheadAttentionHardAligned,
MonotonicMultiheadAttentionInfiniteLookback,
)
from typing import Dict, Optional
from examples.simultaneous_translation.utils import p_choose_strategy
def fixed_pooling_monotonic_attention(monotonic_attention):
def create_model(monotonic_attention, klass):
class FixedStrideMonotonicAttention(monotonic_attention):
def __init__(self, args):
self.waitk_lagging = 0
self.num_heads = 0
self.noise_mean = 0.0
self.noise_var = 0.0
super().__init__(args)
self.pre_decision_type = args.fixed_pre_decision_type
self.pre_decision_ratio = args.fixed_pre_decision_ratio
self.pre_decision_pad_threshold = args.fixed_pre_decision_pad_threshold
if self.pre_decision_ratio == 1:
return
self.strategy = args.simul_type
if args.fixed_pre_decision_type == "average":
self.pooling_layer = torch.nn.AvgPool1d(
kernel_size=self.pre_decision_ratio,
stride=self.pre_decision_ratio,
ceil_mode=True,
)
elif args.fixed_pre_decision_type == "last":
def last(key):
if key.size(2) < self.pre_decision_ratio:
return key
else:
k = key[
:,
:,
self.pre_decision_ratio - 1 :: self.pre_decision_ratio,
].contiguous()
if key.size(-1) % self.pre_decision_ratio != 0:
k = torch.cat([k, key[:, :, -1:]], dim=-1).contiguous()
return k
self.pooling_layer = last
else:
raise NotImplementedError
@staticmethod
def add_args(parser):
super(
FixedStrideMonotonicAttention, FixedStrideMonotonicAttention
).add_args(parser)
parser.add_argument(
"--fixed-pre-decision-ratio",
type=int,
required=True,
help=(
"Ratio for the fixed pre-decision,"
"indicating how many encoder steps will start"
"simultaneous decision making process."
),
)
parser.add_argument(
"--fixed-pre-decision-type",
default="average",
choices=["average", "last"],
help="Pooling type",
)
parser.add_argument(
"--fixed-pre-decision-pad-threshold",
type=float,
default=0.3,
help="If a part of the sequence has pad"
",the threshold the pooled part is a pad.",
)
def insert_zeros(self, x):
bsz_num_heads, tgt_len, src_len = x.size()
stride = self.pre_decision_ratio
weight = F.pad(torch.ones(1, 1, 1).to(x), (stride - 1, 0))
x_upsample = F.conv_transpose1d(
x.view(-1, src_len).unsqueeze(1),
weight,
stride=stride,
padding=0,
)
return x_upsample.squeeze(1).view(bsz_num_heads, tgt_len, -1)
def p_choose_waitk(
self, query, key, key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None
):
"""
query: bsz, tgt_len
key: bsz, src_len
key_padding_mask: bsz, src_len
"""
if incremental_state is not None:
# Retrieve target length from incremental states
# For inference the length of query is always 1
tgt = incremental_state["steps"]["tgt"]
assert tgt is not None
tgt_len = int(tgt)
else:
tgt_len, bsz, _ = query.size()
src_len, bsz, _ = key.size()
p_choose = torch.ones(bsz, tgt_len, src_len).to(query)
p_choose = torch.tril(p_choose, diagonal=self.waitk_lagging - 1)
p_choose = torch.triu(p_choose, diagonal=self.waitk_lagging - 1)
if incremental_state is not None:
p_choose = p_choose[:, -1:]
tgt_len = 1
# Extend to each head
p_choose = (
p_choose.contiguous()
.unsqueeze(1)
.expand(-1, self.num_heads, -1, -1)
.contiguous()
.view(-1, tgt_len, src_len)
)
return p_choose
def p_choose(
self,
query: Optional[Tensor],
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
assert key is not None
assert query is not None
src_len = key.size(0)
tgt_len = query.size(0)
batch_size = query.size(1)
if self.pre_decision_ratio == 1:
if self.strategy == "waitk":
return p_choose_strategy.waitk(
query,
key,
self.waitk_lagging,
self.num_heads,
key_padding_mask,
incremental_state=incremental_state,
)
else: # hard_aligned or infinite_lookback
q_proj, k_proj, _ = self.input_projections(query, key, None, "monotonic")
attn_energy = self.attn_energy(q_proj, k_proj, key_padding_mask)
return p_choose_strategy.hard_aligned(
q_proj,
k_proj,
attn_energy,
self.noise_mean,
self.noise_var,
self.training
)
key_pool = self.pooling_layer(key.transpose(0, 2)).transpose(0, 2)
if key_padding_mask is not None:
key_padding_mask_pool = (
self.pooling_layer(key_padding_mask.unsqueeze(0).float())
.squeeze(0)
.gt(self.pre_decision_pad_threshold)
)
# Make sure at least one element is not pad
key_padding_mask_pool[:, 0] = 0
else:
key_padding_mask_pool = None
if incremental_state is not None:
# The floor instead of ceil is used for inference
# But make sure the length key_pool at least 1
if (
max(1, math.floor(key.size(0) / self.pre_decision_ratio))
) < key_pool.size(0):
key_pool = key_pool[:-1]
if key_padding_mask_pool is not None:
key_padding_mask_pool = key_padding_mask_pool[:-1]
p_choose_pooled = self.p_choose_waitk(
query,
key_pool,
key_padding_mask_pool,
incremental_state=incremental_state,
)
# Upsample, interpolate zeros
p_choose = self.insert_zeros(p_choose_pooled)
if p_choose.size(-1) < src_len:
# Append zeros if the upsampled p_choose is shorter than src_len
p_choose = torch.cat(
[
p_choose,
torch.zeros(
p_choose.size(0),
tgt_len,
src_len - p_choose.size(-1)
).to(p_choose)
],
dim=2
)
else:
# can be larger than src_len because we used ceil before
p_choose = p_choose[:, :, :src_len]
p_choose[:, :, -1] = p_choose_pooled[:, :, -1]
assert list(p_choose.size()) == [
batch_size * self.num_heads,
tgt_len,
src_len,
]
return p_choose
FixedStrideMonotonicAttention.__name__ = klass.__name__
return FixedStrideMonotonicAttention
return partial(create_model, monotonic_attention)
@register_monotonic_attention("waitk_fixed_pre_decision")
@fixed_pooling_monotonic_attention(MonotonicMultiheadAttentionWaitK)
class MonotonicMultiheadAttentionWaitkFixedStride:
pass
@register_monotonic_attention("hard_aligned_fixed_pre_decision")
@fixed_pooling_monotonic_attention(MonotonicMultiheadAttentionHardAligned)
class MonotonicMultiheadAttentionHardFixedStride:
pass
@register_monotonic_attention("infinite_lookback_fixed_pre_decision")
@fixed_pooling_monotonic_attention(MonotonicMultiheadAttentionInfiniteLookback)
class MonotonicMultiheadAttentionInfiniteLookbackFixedStride:
pass
| {
"repo_name": "pytorch/fairseq",
"path": "examples/simultaneous_translation/modules/fixed_pre_decision.py",
"copies": "1",
"size": "10201",
"license": "mit",
"hash": -1606563717617302800,
"line_mean": 39.1614173228,
"line_max": 97,
"alpha_frac": 0.4633859426,
"autogenerated": false,
"ratio": 4.6922723091076355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5655658251707636,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import torch
import torch.nn as nn
from torch.nn import functional as F
def dice_coef(input, target, threshold=None):
smooth = 1.0
input_flatten = input.view(-1)
if threshold is not None:
input_flatten = (input_flatten > threshold).float()
target_flatten = target.view(-1)
intersection = (input_flatten * target_flatten).sum()
return (
(2. * intersection + smooth) /
(input_flatten.sum() + target_flatten.sum() + smooth)
)
class DiceLoss(nn.Module):
def __init__(self, log=False):
super().__init__()
self.log = log
def forward(self, input, target):
dice_coef_value = dice_coef(F.sigmoid(input), target)
if self.log:
return -torch.log(dice_coef_value)
else:
return 1 - dice_coef_value
class BCEDiceLoss(nn.Module):
def __init__(self, log_dice=False):
super().__init__()
self.bce_loss = nn.BCEWithLogitsLoss()
self.dice_loss = DiceLoss(log=log_dice)
def forward(self, input, target):
return self.bce_loss(input, target) + self.dice_loss(input, target)
losses = {
'bce': nn.BCEWithLogitsLoss,
'bce_dice': partial(BCEDiceLoss, log_dice=False),
'bce_log_dice': partial(BCEDiceLoss, log_dice=True),
}
| {
"repo_name": "creafz/kaggle-carvana",
"path": "loss.py",
"copies": "1",
"size": "1312",
"license": "mit",
"hash": 2497145787039594000,
"line_mean": 26.3333333333,
"line_max": 75,
"alpha_frac": 0.6196646341,
"autogenerated": false,
"ratio": 3.2555831265508686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43752477606508683,
"avg_score": null,
"num_lines": null
} |
from functools import partial
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .efficientnet_blocks import SqueezeExcite
from .efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels
from .helpers import build_model_with_cfg, default_cfg_for_features
from .layers import get_act_fn
from .mobilenetv3 import MobileNetV3, MobileNetV3Features
from .registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = {
'hardcorenas_a': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_A_Green_38ms_75.9_23474aeb.pth'),
'hardcorenas_b': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_B_Green_40ms_76.5_1f882d1e.pth'),
'hardcorenas_c': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_C_Green_44ms_77.1_d4148c9e.pth'),
'hardcorenas_d': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_D_Green_50ms_77.4_23e3cdde.pth'),
'hardcorenas_e': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_E_Green_55ms_77.9_90f20e8a.pth'),
'hardcorenas_f': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_F_Green_60ms_78.1_2855edf1.pth'),
}
def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs):
"""Creates a hardcorenas model
Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS
Paper: https://arxiv.org/abs/2102.11646
"""
num_features = 1280
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=num_features,
stem_size=32,
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=se_layer,
**kwargs,
)
features_only = False
model_cls = MobileNetV3
kwargs_filter = None
if model_kwargs.pop('features_only', False):
features_only = True
kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool')
model_cls = MobileNetV3Features
model = build_model_with_cfg(
model_cls, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_strict=not features_only,
kwargs_filter=kwargs_filter,
**model_kwargs)
if features_only:
model.default_cfg = default_cfg_for_features(model.default_cfg)
return model
@register_model
def hardcorenas_a(pretrained=False, **kwargs):
""" hardcorenas_A """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_b(pretrained=False, **kwargs):
""" hardcorenas_B """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'],
['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'],
['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_c(pretrained=False, **kwargs):
""" hardcorenas_C """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre',
'ir_r1_k5_s1_e3_c40_nre'],
['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_d(pretrained=False, **kwargs):
""" hardcorenas_D """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'],
['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_e(pretrained=False, **kwargs):
""" hardcorenas_E """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25',
'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_f(pretrained=False, **kwargs):
""" hardcorenas_F """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
'ir_r1_k3_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs)
return model
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/models/hardcorenas.py",
"copies": "1",
"size": "8036",
"license": "apache-2.0",
"hash": 2368087243129276400,
"line_mean": 51.8684210526,
"line_max": 148,
"alpha_frac": 0.6070184171,
"autogenerated": false,
"ratio": 2.0642178268687386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8158427680662284,
"avg_score": 0.0025617126612912143,
"num_lines": 152
} |
from functools import partial
import wx
from GuiComponents.InputValidator import *
import wx.grid
class Orientation:
VERTICAL = 1
HORIZONTAL = 0
class GRID_SELECTION_MODES:
CELLS = 0
ROWS = 1
COLUMNS = 2
ROWS_OR_COLUMNS = 3
class WxHelper:
def __init__(self):
pass
class SeriesGrid(wx.grid.Grid):
LABELS = [('Id', 30),
('Site Code', 100),
('Site Name', 150),
('Variable Name', 100),
('Variable Code', 100),
('QC Code', 50),
('Source Description', 150),
('Method Description', 150)]
SERIES_COL = {
u'Site': 1,
u'Variable': 3,
u'QC Code': 4,
u'Source': 5,
u'Method': 6
}
def __init__(self, app, parent, font=wx.SMALL_FONT, size=None):
wx.grid.Grid.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SIMPLE_BORDER)
if size is not None:
self.CacheBestSize(size)
self.SetSizeHints(size)
self.CreateGrid(0, len(WxHelper.SeriesGrid.LABELS))
self.EnableEditing(False)
self.EnableCellEditControl(False)
self.EnableScrolling(True, True)
self.EnableGridLines(True)
self.EnableDragGridSize(False)
self.SetMargins(4, 4)
self.LabelFont = font
self.DefaultCellFont = font
self.SetSelectionMode(GRID_SELECTION_MODES.ROWS)
self.DisableCellEditControl()
self.LastRowSorted = 0
self.LastSortInverted = False
for i in range(0, len(WxHelper.SeriesGrid.LABELS)):
self.SetColLabelValue(i, WxHelper.SeriesGrid.LABELS[i][0])
self.SetColSize(i, WxHelper.SeriesGrid.LABELS[i][1])
self.EnableDragColMove(True)
self.EnableDragColSize(True)
self.SetColLabelSize(20)
self.SetColLabelAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
self.EnableDragRowSize(True)
self.SetRowLabelSize(1)
self.SetRowLabelAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
self.SetDefaultCellAlignment(wx.ALIGN_LEFT, wx.ALIGN_TOP)
# self.SetDefaultCellAlignment(wx.ALIGN_CENTRE, wx.ALIGN_TOP)
app.Bind(wx.PyEventBinder(wx.grid.wxEVT_GRID_CELL_RIGHT_CLICK, 1), self.OnCellRightClick, self)
app.Bind(wx.PyEventBinder(wx.grid.wxEVT_GRID_COL_SORT, 1), self.OnSortClicked, self)
def OnSortClicked(self, event):
"""
:type event: wx.grid.GridEvent
"""
sort_inverted = not self.LastSortInverted if self.LastRowSorted == event.Col else False
self.SortRowsByColumn(event.Col, sort_inverted)
def ApplyLastSort(self):
self.SortRowsByColumn(self.LastRowSorted, self.LastSortInverted)
def SortRowsByColumn(self, column_number, sort_inverted):
sorted_list = []
for i in range(0, self.NumberRows):
sort_value = self.GetCellValue(i, column_number)
try:
sort_value = float(sort_value)
except ValueError:
pass
sorted_list.append((sort_value, self.GetValuesForRow(i)))
sorted_list.sort(key=lambda x: x[0], reverse=sort_inverted)
self.Clear()
for row_values in [item[1] for item in sorted_list]:
self.AddGridRow(list(row_values))
self.LastRowSorted = column_number
self.LastSortInverted = sort_inverted
def GetValuesForRow(self, row_number):
return [self.GetCellValue(row_number, column_number) for column_number in range(0, self.NumberCols)]
def AddGridRow(self, values):
"""
:type values: list[object]
:type grid: wx.grid.Grid
"""
num_cols = len(values) if len(values) <= self.NumberCols else self.NumberCols
self.AppendRows(1)
for i in range(0, num_cols):
self.SetCellValue(self.GetNumberRows() - 1, i, unicode(values[i]))
def AppendSeries(self, series):
values = [series.id, series.site_code, series.site_name, series.variable_name,
series.variable_code, series.quality_control_level_code,
series.source_description, series.method_description]
self.AddGridRow(values)
def InsertSeriesList(self, series_list, do_sort=True):
for series in series_list:
self.AppendSeries(series)
if do_sort:
self.ApplyLastSort()
def InsertSeries(self, series, do_sort=True):
self.AppendSeries(series)
if do_sort:
self.ApplyLastSort()
def RemoveSelectedRows(self):
for i in range(0, self.NumberRows):
while i in self.GetSelectedRows():
self.DeleteRows(i)
def GetSelectedSeries(self):
return [int(self.GetCellValue(row, 0)) for row in self.GetSelectedRows()]
def GetSeries(self):
series = []
for row in range(0, self.NumberRows):
series.append(int(self.GetCellValue(row, 0)))
return series
def Clear(self):
if self.NumberRows > 0:
self.DeleteRows(0, self.NumberRows)
def OnCellRightClick(self, event):
"""
:type event: wx.grid.GridEvent
"""
menu = wx.Menu()
WxHelper.AddNewMenuItem(self, menu, 'Select All', on_click=partial(self._category_selection,
command='All', row=event.GetRow()))
WxHelper.AddNewMenuItem(self, menu, 'Deselect All', on_click=partial(self._category_selection,
command='None', row=event.GetRow()))
for text in WxHelper.SeriesGrid.SERIES_COL.iterkeys():
select = text + ': Select All'
deselect = text + ': Deselect All'
WxHelper.AddNewMenuItem(self, menu, select, on_click=partial(self._category_selection,
command=select, row=event.GetRow()))
WxHelper.AddNewMenuItem(self, menu, deselect, on_click=partial(self._category_selection,
command=deselect, row=event.GetRow()))
self.PopupMenu(menu)
def _category_selection(self, event, command, row):
if command == 'All':
self.SelectAll()
elif command == 'None':
self.ClearSelection()
else:
category, action = command.split(u': ')
check_column = WxHelper.SeriesGrid.SERIES_COL[category]
check_value = self.GetCellValue(row, check_column)
if check_value is None or len(check_value) == 0:
print('Unable to parse information for row {} and column {}'.format(row, check_column))
return
for i in range(0, self.NumberRows):
cell_value = self.GetCellValue(i, check_column)
if cell_value == check_value:
if action == 'Select All':
self.SelectRow(i, addToSelected=True)
elif action == 'Deselect All':
self.DeselectRow(i)
@staticmethod
def GetFlags(flags=0, expand=True, top=True, bottom=True, left=True, right=True):
flags |= wx.EXPAND if expand else 0
flags |= wx.TOP if top else 0
flags |= wx.LEFT if left else 0
flags |= wx.RIGHT if right else 0
flags |= wx.BOTTOM if bottom else 0
return flags
@staticmethod
def GetBitmap(path, size_x=None, size_y=None):
image = wx.Bitmap.ConvertToImage(wx.Bitmap(path, wx.BITMAP_TYPE_ANY))
if size_x is not None and size_y is not None:
image = image.Scale(size_x, size_y, wx.IMAGE_QUALITY_HIGH)
return wx.Bitmap(image)
@staticmethod
def GetGridBagSizer(padding_x=8, padding_y=8):
sizer = wx.GridBagSizer(vgap=padding_y, hgap=padding_x)
sizer.SetFlexibleDirection(direction=wx.BOTH)
sizer.SetNonFlexibleGrowMode(mode=wx.FLEX_GROWMODE_ALL)
return sizer
@staticmethod
def GetRadioBox(parent, label, options, orientation=Orientation.VERTICAL):
radiobox = wx.RadioBox(parent, wx.ID_ANY, label, wx.DefaultPosition, wx.DefaultSize, options, orientation,
wx.RA_SPECIFY_ROWS)
radiobox.SetSelection(0)
return radiobox
@staticmethod
def GetWxSize(size_x, size_y):
size_x = -1 if size_x is None else size_x
size_y = -1 if size_y is None else size_y
return wx.Size(size_x, size_y)
@staticmethod
def GetTextInput(parent, text=u'', size_x=None, size_y=None, valid_input=PATTERNS.ANY,
max_length=None, wrap_text=False, style=7, **kwargs):
control = wx.TextCtrl
if 'static_text' in kwargs:
kwargs.pop('static_text')
kwargs.update({'label': text})
control = wx.StaticText
else:
kwargs.update({'value': text,
'validator': CharValidator(valid_input)})
if wrap_text:
style = style | wx.TE_BESTWRAP | wx.TE_MULTILINE
kwargs.update({'pos': wx.DefaultPosition,
'size': wx.DefaultSize,
'style': style})
text_ctrl = control(parent, wx.ID_ANY, **kwargs)
text_ctrl.SetMinSize(WxHelper.GetWxSize(size_x, size_y))
text_ctrl.SetMaxSize(WxHelper.GetWxSize(size_x, size_y))
if max_length is not None:
text_ctrl.SetMaxLength(max_length)
return text_ctrl
@staticmethod
def GetStaticText(parent, label, **kwargs):
return WxHelper.GetTextInput(parent, label, static_text=True, **kwargs)
@staticmethod
def GetListBox(app, parent, items, on_right_click=None, size_x=None, size_y=None, font=None, style=wx.LB_EXTENDED|wx.HSCROLL):
# __init__(self, parent=None, id=None, pos=None, size=None, choices=[], style=0, validator=None, name=None)
listbox = wx.ListBox(parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, items, style)
if size_x is not None and size_y is not None:
listbox.SetMinSize(wx.Size(size_x, size_y))
listbox.SetMaxSize(wx.Size(size_x, size_y))
if font is not None:
listbox.SetFont(font)
if on_right_click is not None:
app.Bind(wx.EVT_CONTEXT_MENU, on_right_click, listbox)
return listbox
@staticmethod
def GetButton(app, parent, label, on_click=None, size_x=None, size_y=None, **kwargs):
button = wx.Button(parent, id=wx.ID_ANY, label=label, pos=wx.DefaultPosition, size=wx.DefaultSize, **kwargs)
if size_x is not None and size_y is not None:
button.SetMinSize(wx.Size(size_x, size_y))
button.SetMaxSize(wx.Size(size_x, size_y))
if on_click is not None:
app.Bind(wx.EVT_BUTTON, on_click, button)
return button
@staticmethod
def GetChoice(app, parent, choices, on_change=None, size_x=None, size_y=None, font=None):
choice = wx.Choice(parent, wx.ID_ANY, choices=choices)
if size_x is not None and size_y is not None:
choice.SetMinSize(wx.Size(size_x, size_y))
choice.SetMaxSize(wx.Size(size_x, size_y))
if on_change is not None:
app.Bind(wx.EVT_CHOICE, on_change, choice)
if font is not None:
choice.SetFont(font)
choice.SetSelection(0)
return choice
@staticmethod
def GetCheckBox(app, parent, label, on_change=None, checked=False):
checkbox = wx.CheckBox(parent, wx.ID_ANY, label, wx.Point(-1, -1), wx.DefaultSize, 0)
if checked:
checkbox.SetValue(wx.CHK_CHECKED)
if on_change is not None:
app.Bind(wx.EVT_CHECKBOX, on_change, checkbox)
return checkbox
@staticmethod
def GetLabel(parent, text, font=None, style=7):
label = wx.StaticText(parent, wx.ID_ANY, text, style=style)
if font is not None:
label.SetFont(font)
return label
@staticmethod
def GetHelpLabel(parent, text, **kwargs):
font = None
if 'font' in kwargs:
font = kwargs.pop('font')
label = wx.Button(parent, wx.ID_HELP, text, **kwargs)
if font:
label.SetFont(font)
return label
@staticmethod
def AddNewMenuItem(app, menu, label, on_click=None, return_item=False):
menu_item = wx.MenuItem(menu, wx.ID_ANY, label)
if on_click is not None:
app.Bind(wx.EVT_MENU, on_click, menu_item)
menu.Append(menu_item)
if return_item:
return menu_item
@staticmethod
def UpdateChoiceControl(control, choices):
if control is not None and choices is not None:
db_index = control.GetCurrentSelection()
db_name = control.GetStringSelection()
control.Clear()
control.SetItems(choices if isinstance(choices, list) else list(choices))
string_index = control.FindString(db_name)
if string_index >= 0:
control.SetSelection(string_index)
elif db_index < len(control.Items):
control.SetSelection(db_index)
else:
control.SetSelection(0)
@staticmethod
def GetMouseClickIndex(event, control):
evt_pos = event.GetPosition()
list_pos = control.ScreenToClient(evt_pos)
return control.HitTest(list_pos)
@staticmethod
def ModalConfirm(app, message, caption='Confirm Action'):
dialog = wx.MessageDialog(app, message, caption, wx.YES_NO | wx.ICON_QUESTION)
return dialog
class PADDING:
VERTICAL = WxHelper.GetFlags(left=False, right=False)
HORIZONTAL = WxHelper.GetFlags(top=False, bottom=False)
ALL = WxHelper.GetFlags()
class ALIGN:
CENTER = wx.ALIGN_CENTER | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL
LEFT = wx.ALIGN_LEFT | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL
RIGHT = wx.ALIGN_RIGHT | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL
| {
"repo_name": "UCHIC/h2outility",
"path": "src/GuiComponents/WxUtilities.py",
"copies": "1",
"size": "14741",
"license": "bsd-3-clause",
"hash": 2419786922174822000,
"line_mean": 37.189119171,
"line_max": 130,
"alpha_frac": 0.5746557221,
"autogenerated": false,
"ratio": 3.866998950682057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.993612998840276,
"avg_score": 0.001104936875859178,
"num_lines": 386
} |
from functools import partial
import wx
from PIL import Image
import shared
from util import load_data, save_data, AddLinearSpacer, SetupChoice
from widgets import LabeledWidget
class ImagePanel(wx.Panel):
format = None
savefile = 'imgdata.json'
data = None
colormode_dict = {'RGB': 'RGB', 'Palette': 'P',
'Grayscale': 'L', 'Bilevel': '1'}
palette_dict = {'Web': Image.WEB, 'Adaptive': Image.ADAPTIVE}
dither_dict = {'Floydsteinberg': Image.FLOYDSTEINBERG, 'None': Image.NONE}
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
if ImagePanel.data is None:
ImagePanel.data = load_data(self.savefile)
shared.format_dict.update(ImagePanel.data)
self.data = ImagePanel.data.get(self.format, {})
def OnEvent(self, event, attr, func):
if not self.format in shared.format_dict:
shared.format_dict[self.format] = {}
shared.format_dict[self.format][attr] = func()
save_data(shared.format_dict, file=self.savefile)
def OnSlider(self, event, attr, func=None):
self.OnEvent(event, attr, func or event.GetInt)
def OnCheckBox(self, event, attr, func=None):
self.OnEvent(event, attr, func or event.IsChecked)
def OnChoice(self, event, attr, edict=None, func=None):
if edict:
func = lambda: edict[event.GetString()]
else:
func = func or event.GetString
self.OnEvent(event, attr, func)
def onColour(self, event, attr, func=None):
self.OnEvent(event, attr,
func or (lambda: event.GetColour().Get(False)))
class JpegPanel(ImagePanel):
format = 'JPEG'
def __init__(self, *args, **kwargs):
ImagePanel.__init__(self, *args, **kwargs)
# Set up controls
labeledQuality = LabeledWidget(parent=self, cls=wx.Slider,
label='Quality',
minValue=1, maxValue=95,
style=wx.SL_HORIZONTAL)
qualitySlider = labeledQuality.widget
qualitySlider.SetValue(self.data.get('quality', 75))
qualityText = wx.StaticText(parent=self)
def setQualityText():
quality = qualitySlider.GetValue()
qualityText.SetLabel(str(quality))
return quality
setQualityText()
qualitySlider.Bind(wx.EVT_SLIDER,
partial(self.OnSlider, attr='quality',
func=setQualityText))
optimizeCheckBox = wx.CheckBox(parent=self, label='Optimize')
optimizeCheckBox.SetValue(self.data.get('optimize', False))
optimizeCheckBox.Bind(wx.EVT_CHECKBOX,
partial(self.OnCheckBox, attr='optimize'))
progCheckBox = wx.CheckBox(parent=self, label='Progressive')
progCheckBox.SetValue(self.data.get('progressive', False))
progCheckBox.Bind(wx.EVT_CHECKBOX,
partial(self.OnCheckBox, attr='progressive'))
labeledBackgroundColor = LabeledWidget(parent=self, label='Background',
cls=wx.ColourPickerCtrl)
bgColorPicker = labeledBackgroundColor.widget
bgColorPicker.SetColour(self.data.get('background',
(255, 255, 255)))
bgColorPicker.Bind(wx.EVT_COLOURPICKER_CHANGED,
partial(self.onColour, attr='background'))
labeledColormode = LabeledWidget(parent=self, cls=wx.Choice,
label='Color Mode')
colormodeChoice = labeledColormode.widget
SetupChoice(colormodeChoice, ['RGB', 'Grayscale', 'Bilevel'],
self.data.get('colormode'), self.colormode_dict)
colormodeChoice.Bind(wx.EVT_CHOICE, partial(self.OnChoice,
attr='colormode',
edict=self.colormode_dict))
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddStretchSpacer(prop=1)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(labeledQuality, flag=wx.ALIGN_CENTER)
hSizer.Add(qualityText, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 5)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(optimizeCheckBox, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(progCheckBox, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(labeledBackgroundColor, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 10)
self.sizer.Add(labeledColormode, flag=wx.ALIGN_CENTER)
self.sizer.AddStretchSpacer(prop=1)
#Layout sizers
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
class PngPanel(ImagePanel):
format = 'PNG'
def __init__(self, *args, **kwargs):
ImagePanel.__init__(self, *args, **kwargs)
# Set up controls
optimizeCheckBox = wx.CheckBox(parent=self, label='Optimize')
optimizeCheckBox.SetValue(self.data.get('optimize', False))
optimizeCheckBox.Bind(wx.EVT_CHECKBOX,
partial(self.OnCheckBox, attr='optimize'))
labeledBgColor = LabeledWidget(parent=self, label='Background',
cls=wx.ColourPickerCtrl)
bgColorText, bgColorPicker = labeledBgColor.GetControls()
bgColorPicker.SetColour(self.data.get('background',
(255, 255, 255)))
bgColorPicker.Bind(wx.EVT_COLOURPICKER_CHANGED,
partial(self.onColour, attr='background'))
transCheckBox = wx.CheckBox(parent=self, label='Transparency')
transCheckBox.SetValue(self.data.get('transparent', False))
def onTrans():
value = transCheckBox.IsChecked()
labeledBgColor.Enable(not value)
return value
transCheckBox.Bind(wx.EVT_CHECKBOX, partial(self.OnCheckBox,
attr='transparent',
func=onTrans))
onTrans()
interlaceCheckBox = wx.CheckBox(parent=self, label='Interlace')
interlaceCheckBox.SetValue(self.data.get('interlace', False))
interlaceCheckBox.Bind(wx.EVT_CHECKBOX, partial(self.OnCheckBox,
attr='interlace'))
self.labeledColormode = LabeledWidget(parent=self, cls=wx.Choice,
label='Color Mode')
colormodeChoice = self.labeledColormode.widget
SetupChoice(colormodeChoice, ['RGB', 'Palette',
'Grayscale', 'Bilevel'],
self.data.get('colormode'), self.colormode_dict)
colormodeChoice.Bind(wx.EVT_CHOICE, partial(self.OnChoice,
attr='colormode',
func=self.onColormodeChoice))
self.paletteChoice = wx.Choice(parent=self)
SetupChoice(self.paletteChoice,
['Web', 'Adaptive'], self.data.get('palette'),
self.palette_dict)
self.paletteChoice.Bind(wx.EVT_CHOICE,
partial(self.OnChoice, attr='palette',
edict=self.palette_dict))
self.paletteText = wx.StaticText(parent=self, label='Palette')
self.ditherChoice = wx.Choice(parent=self)
SetupChoice(self.ditherChoice,
['Floydsteinberg', 'None'],
self.data.get('dither'), self.dither_dict)
self.ditherChoice.Bind(wx.EVT_CHOICE,
partial(self.OnChoice, attr='dither',
edict=self.dither_dict))
self.ditherText = wx.StaticText(parent=self, label='Dither')
self.colorsChoice = wx.ComboBox(parent=self, style=wx.TE_PROCESS_ENTER)
self.colorsChoice.AppendItems(strings=map(str, [2, 4, 8, 16, 32,
64, 128, 256]))
self.colorsChoice.SetValue(str(self.data.get('colors', 256)))
def onColorChoice():
value = int(self.colorsChoice.GetStringSelection())
self.GetParent().GetChildren()[0].SetFocus()
return value
self.colorsChoice.Bind(wx.EVT_COMBOBOX,
partial(self.OnChoice, attr='colors',
func=onColorChoice))
def onColorEnter():
try:
value = int(self.colorsChoice.GetValue())
except ValueError:
value = -1
if value <= 0 or value >= 256:
try:
self.colorsChoice.Undo()
value = int(self.colorsChoice.GetValue())
except:
value = 256
self.GetParent().GetChildren()[0].SetFocus()
return value
self.colorsChoice.Bind(wx.EVT_TEXT_ENTER,
partial(self.OnChoice, attr='colors',
func=onColorEnter))
self.colorsText = wx.StaticText(parent=self, label='Colors')
bitsChoice = wx.Choice(parent=self)
SetupChoice(bitsChoice,
map(str, [1, 2, 4, 8]),
str(self.data.get('bits', 8)))
bitsChoice.Bind(wx.EVT_CHOICE,
partial(self.OnChoice, attr='bits',
func=(lambda: int(bitsChoice.GetStringSelection()))))
bitsText = wx.StaticText(parent=self, label='Bits')
self.onColormodeChoice()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddStretchSpacer(prop=1)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(optimizeCheckBox, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 10)
hSizer.Add(interlaceCheckBox, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 10)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 5)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(transCheckBox, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 10)
hSizer.Add(labeledBgColor, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 5)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(self.labeledColormode, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 15)
hSizer.Add(self.paletteText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(self.paletteChoice, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 10)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(self.ditherText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(self.ditherChoice, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 15)
hSizer.Add(self.colorsText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(self.colorsChoice, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 10)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(bitsText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(bitsChoice, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
self.sizer.AddStretchSpacer(prop=1)
#Layout sizers
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
def onColormodeChoice(self):
mode = self.colormode_dict[self.labeledColormode.widget.
GetStringSelection()]
enabled = (mode == 'P')
text_color = (0, 0, 0) if enabled else (130, 130, 130)
controls = (self.paletteChoice, self.ditherChoice, self.colorsChoice)
for control in controls:
control.Enable(enabled)
texts = (self.paletteText, self.ditherText, self.colorsText)
for text in texts:
text.Enable(enabled)
return mode
class GifPanel(ImagePanel):
format = 'GIF'
def __init__(self, *args, **kwargs):
ImagePanel.__init__(self, *args, **kwargs)
# Set up controls
optimizeCheckBox = wx.CheckBox(parent=self, label='Optimize')
optimizeCheckBox.SetValue(self.data.get('optimize', False))
optimizeCheckBox.Bind(wx.EVT_CHECKBOX,
partial(self.OnCheckBox, attr='optimize'))
bgColorPicker = wx.ColourPickerCtrl(parent=self)
bgColorPicker.SetColour(self.data.get('background',
(255, 255, 255)))
bgColorPicker.Bind(wx.EVT_COLOURPICKER_CHANGED,
partial(self.onColour, attr='background'))
bgColorText = wx.StaticText(parent=self, label='Background')
transCheckBox = wx.CheckBox(parent=self, label='Transparency')
transCheckBox.SetValue(self.data.get('transparent', False))
def onTrans():
value = transCheckBox.IsChecked()
bgColorText.Enable(not value)
bgColorPicker.Enable(not value)
return value
transCheckBox.Bind(wx.EVT_CHECKBOX,
partial(self.OnCheckBox, attr='transparent',
func=onTrans))
onTrans()
interlaceCheckBox = wx.CheckBox(parent=self, label='Interlace')
interlaceCheckBox.SetValue(self.data.get('interlace', False))
interlaceCheckBox.Bind(wx.EVT_CHECKBOX,
partial(self.OnCheckBox, attr='interlace'))
self.colormodeChoice = wx.Choice(parent=self)
SetupChoice(self.colormodeChoice,
['RGB', 'Palette', 'Grayscale', 'Bilevel'],
self.data.get('colormode'), self.colormode_dict)
self.colormodeChoice.Bind(wx.EVT_CHOICE,
partial(self.OnChoice, attr='colormode',
func=self.onColormodeChoice))
colormodeText = wx.StaticText(parent=self, label='Color Mode')
self.paletteChoice = wx.Choice(parent=self)
SetupChoice(self.paletteChoice,
['Web', 'Adaptive'], self.data.get('palette'),
self.palette_dict)
self.paletteChoice.Bind(wx.EVT_CHOICE,
partial(self.OnChoice, attr='palette',
edict=self.palette_dict))
self.paletteText = wx.StaticText(parent=self, label='Palette')
self.ditherChoice = wx.Choice(parent=self)
SetupChoice(self.ditherChoice,
['Floydsteinberg', 'None'],
self.data.get('dither'), self.dither_dict)
self.ditherChoice.Bind(wx.EVT_CHOICE,
partial(self.OnChoice, attr='dither',
edict=self.dither_dict))
self.ditherText = wx.StaticText(parent=self, label='Dither')
self.colorsChoice = wx.ComboBox(parent=self, style=wx.TE_PROCESS_ENTER)
self.colorsChoice.AppendItems(strings=map(str, [2, 4, 8, 16, 32,
64, 128, 256]))
self.colorsChoice.SetValue(str(self.data.get('colors', 256)))
def onColorChoice():
value = int(self.colorsChoice.GetStringSelection())
self.GetParent().GetChildren()[0].SetFocus()
return value
self.colorsChoice.Bind(wx.EVT_COMBOBOX,
partial(self.OnChoice, attr='colors',
func=onColorChoice))
def onColorEnter():
try:
value = int(self.colorsChoice.GetValue())
except ValueError:
value = -1
if value <= 0 or value >= 256:
try:
self.colorsChoice.Undo()
value = int(self.colorsChoice.GetValue())
except:
value = 256
self.GetParent().GetChildren()[0].SetFocus()
return value
self.colorsChoice.Bind(wx.EVT_TEXT_ENTER,
partial(self.OnChoice, attr='colors',
func=onColorEnter))
self.colorsText = wx.StaticText(parent=self, label='Colors')
self.onColormodeChoice()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddStretchSpacer(prop=1)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(optimizeCheckBox, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 10)
hSizer.Add(interlaceCheckBox, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 10)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 5)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(transCheckBox, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 10)
hSizer.Add(bgColorText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(bgColorPicker, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 5)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(colormodeText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(self.colormodeChoice, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 15)
hSizer.Add(self.paletteText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(self.paletteChoice, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
AddLinearSpacer(self.sizer, 10)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.AddStretchSpacer(prop=1)
hSizer.Add(self.ditherText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(self.ditherChoice, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 15)
hSizer.Add(self.colorsText, flag=wx.ALIGN_CENTER)
AddLinearSpacer(hSizer, 5)
hSizer.Add(self.colorsChoice, flag=wx.ALIGN_CENTER)
hSizer.AddStretchSpacer(prop=1)
self.sizer.Add(hSizer, flag=wx.ALIGN_CENTER)
self.sizer.AddStretchSpacer(prop=1)
#Layout sizers
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
def onColormodeChoice(self):
mode = self.colormode_dict[self.colormodeChoice.GetStringSelection()]
enabled = (mode == 'P')
text_color = (0, 0, 0) if enabled else (130, 130, 130)
controls = (self.paletteChoice, self.ditherChoice, self.colorsChoice)
for control in controls:
control.Enable(enabled)
texts = (self.paletteText, self.ditherText, self.colorsText)
for text in texts:
text.Enable(enabled)
return mode
class SmartPanel(ImagePanel):
about = 'Converts grayscale images to PNG and color images to JPEG.'
def __init__(self, *args, **kwargs):
ImagePanel.__init__(self, *args, **kwargs)
aboutText = wx.StaticText(parent=self, label=self.about)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(aboutText, flag=wx.ALIGN_CENTER)
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
gui_dict = {'JPEG': JpegPanel, 'PNG': PngPanel, 'GIF': GifPanel,
'Smart': SmartPanel}
| {
"repo_name": "borgler/SuperConverter",
"path": "imggui.py",
"copies": "1",
"size": "20840",
"license": "mpl-2.0",
"hash": -1686475261396572200,
"line_mean": 41.3577235772,
"line_max": 85,
"alpha_frac": 0.5756238004,
"autogenerated": false,
"ratio": 3.9892802450229707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006922400525156048,
"num_lines": 492
} |
from functools import partial
class API:
def __init__(self, session):
self._session = session
def __getattr__(self, method_name):
return Request(self, method_name)
async def __call__(self, method_name, **method_kwargs):
return await getattr(self, method_name)(**method_kwargs)
class Request:
__slots__ = ('_api', '_method_name', '_method_args')
def __init__(self, api, method_name):
self._api = api
self._method_name = method_name
def __getattr__(self, method_name):
return Request(self._api, self._method_name + '.' + method_name)
async def __call__(self, **method_args):
timeout = method_args.pop('timeout', None)
need_raw_response = method_args.pop('raw_response', False)
self._method_args = method_args
return await self._api._session.send_api_request(self._method_name, method_args, timeout, need_raw_response)
class LazyAPI:
def __init__(self, session):
self._session = session
def __getattr__(self, method_name):
return LazyRequest(self, method_name)
def __call__(self, method_name, **method_kwargs):
return getattr(self, method_name)(**method_kwargs)
class LazyRequest:
__slots__ = ('_api', '_method_name', '_method_args')
def __init__(self, api, method_name):
self._api = api
self._method_name = method_name
def __getattr__(self, method_name):
return LazyRequest(self._api, self._method_name + '.' + method_name)
def __call__(self, **method_args):
timeout = method_args.pop('timeout', None)
self._method_args = method_args
return partial(
self._api._session.send_api_request,
self._method_name,
method_args,
timeout
)
| {
"repo_name": "Fahreeve/aiovk",
"path": "aiovk/api.py",
"copies": "1",
"size": "1809",
"license": "mit",
"hash": 1689753151943713300,
"line_mean": 28.6557377049,
"line_max": 116,
"alpha_frac": 0.594803759,
"autogenerated": false,
"ratio": 3.792452830188679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9886556014717612,
"avg_score": 0.0001401148942132549,
"num_lines": 61
} |
from functools import partial
class AuthorizationExpression(object):
def __call__(self, context):
raise NotImplementedError
def __and__(self, other):
self.require_auth_expr(other)
return AndExpression(self, other)
def __or__(self, other):
self.require_auth_expr(other)
return OrExpression(self, other)
def __invert__(self):
return lambda x: not self(x)
def __eq__(self, other):
raise NotImplementedError
def require_auth_expr(self, other):
if not isinstance(other, AuthorizationExpression):
raise TypeError, "Type '%s' is incompatible with type '%s'" % (type(self).__name__, type(other).__name__)
def uses(self, key):
raise NotImplementedError
class BinaryExpression(AuthorizationExpression):
def __init__(self, a, b):
self.a = a
self.b = b
def uses(self, key):
return self.a.uses(key) or self.b.uses(key)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.a == self.a and other.b == self.b
class AndExpression(BinaryExpression):
def __call__(self, context):
return self.a(context) and self.b(context)
def __repr__(self):
return 'AndExpression(%s, %s)' % (repr(self.a), repr(self.b))
class OrExpression(BinaryExpression):
def __call__(self, context):
return self.a(context) or self.b(context)
def __repr__(self):
return 'OrExpression(%s, %s)' % (repr(self.a), repr(self.b))
class ObjectProxy(AuthorizationExpression):
def __init__(self, name):
self._name = name
def __eq__(self, other):
return isinstance(other, ObjectProxy) and other._name == self._name
def __getattr__(self, key):
return ObjectProxyValue(self, key)
def __getitem__(self, key):
return self.__getattr__(key)
def __call__(self, context):
return self._name in context
def __repr__(self):
return 'ObjectProxy(%s)' % self._name
def match(self, fn):
return ObjectProxyMatch(self, fn)
def exists(self):
return self
def uses(self, key):
return self._name == key
def get(self, context):
return context.get(self._name, {})
class ObjectProxyMatch(AuthorizationExpression):
def __init__(self, proxy, fn):
self._proxy = proxy
self.fn = fn
def __repr__(self):
return 'ObjectProxyMatch(%s, %s)' % (repr(self._proxy), repr(self.fn))
def __eq__(self, other):
return isinstance(other, ObjectProxyMatch) and other._proxy == self._proxy and other.fn == self.fn
def __call__(self, context):
obj = self._proxy.get(context)
return self.fn(obj)
def uses(self, key):
return self._proxy.uses(key)
class ObjectProxyValue(AuthorizationExpression):
def __init__(self, proxy, key):
self._proxy = proxy
self._key = key
def __repr__(self):
return 'ObjectProxyValue(%s, %s)' % (repr(self._proxy), self._key)
def __eq__(self, other):
return isinstance(other, ObjectProxyValue) and other._proxy == self._proxy and other._key == self._key
def get_value(self, context):
obj = self._proxy.get(context)
val = obj.get(self._key)
return val
def exists(self):
return self
def uses(self, key):
return self._proxy.uses(key)
def __call__(self, context):
return self._proxy(context) and self._key in self._proxy.get(context)
def __eq__(self, other):
return EqualsComparison(self, other)
def __ne__(self, other):
return NotEqualsComparison(self, other)
def __lt__(self, other):
return LessThanComparison(self, other)
def __gt__(self, other):
return GreaterThanComparison(self, other)
def __le__(self, other):
return LessThanEqualComparison(self, other)
def __ge__(self, other):
return GreaterThanEqualComparison(self, other)
def in_(self, other):
return ContainsComparison(self, other)
class ObjectProxyValueComparison(AuthorizationExpression):
opstr = ''
def __init__(self, proxy, other):
self._proxy = proxy
self.other = other
def __repr__(self):
return '%s %s %s' % (repr(self._proxy), self.opstr, repr(self.other))
def __eq__(self, other):
return isinstance(other, self.__class__) and other._proxy == self._proxy and other.other == self.other
def __call__(self, context):
a = self._proxy.get_value(context)
if isinstance(self.other, ObjectProxyValue):
b = self.other.get_value(context)
else:
b = self.other
return self.compare(a, b)
def compare(self, a, b):
raise NotImplementedError
def uses(self, key):
if isinstance(self.other, AuthorizationExpression):
return self._proxy.uses(key) or self.other.uses(key)
else:
return self._proxy.uses(key)
class EqualsComparison(ObjectProxyValueComparison):
opstr = '=='
def compare(self, a, b):
return a == b
class NotEqualsComparison(ObjectProxyValueComparison):
opstr = '!='
def compare(self, a, b):
return a != b
class LessThanComparison(ObjectProxyValueComparison):
opstr = '<'
def compare(self, a, b):
return a < b
class GreaterThanComparison(ObjectProxyValueComparison):
opstr = '>'
def compare(self, a, b):
return a > b
class LessThanEqualComparison(ObjectProxyValueComparison):
opstr = '<='
def compare(self, a, b):
return a <= b
class GreaterThanEqualComparison(ObjectProxyValueComparison):
opstr = '>='
def compare(self, a, b):
return a >= b
class ContainsComparison(ObjectProxyValueComparison):
opstr = ' in '
def compare(self, a, b):
return a in b
class ItemProxy(ObjectProxy):
def __init__(self, entity, name='item'):
super(ItemProxy, self).__init__(name)
self._entity = entity
def __getattr__(self, key):
link = self._entity.get_link(key)
if link:
return LinkProxy(self, link.entity, key)
else:
return ObjectProxyValue(self, key)
def __repr__(self):
return 'ItemProxy(%s, %s)' % (self._entity.__name__, self._name)
class LinkProxy(ObjectProxyValue, ItemProxy):
def __init__(self, proxy, entity, name):
ItemProxy.__init__(self, entity, name)
ObjectProxyValue.__init__(self, proxy, name)
def get(self, context):
item = self._proxy.get(context)
interface = context['api'].get_interface_for_entity(self._proxy._entity)
return interface.link(item['_id'], self._name,
bypass_authorization=True, show_hidden=True)
def __repr__(self):
return 'LinkProxy(%s, %s)' % (self._proxy._entity.__name__, self._name)
| {
"repo_name": "cooper-software/cellardoor",
"path": "cellardoor/authorization.py",
"copies": "1",
"size": "6500",
"license": "mit",
"hash": 3870897848569374700,
"line_mean": 18.6404833837,
"line_max": 108,
"alpha_frac": 0.6430769231,
"autogenerated": false,
"ratio": 3.200393894633186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43434708177331854,
"avg_score": null,
"num_lines": null
} |
from functools import partial
class Datastore(object):
def __init__(self, db, user_model, role_model=None, provider_user_model=None,
track_login_model=None):
self.db = db
self.user_model = user_model
self.role_model = role_model
self.provider_user_model = provider_user_model
self.track_login_model = track_login_model
self._bind_methods()
def commit(self):
raise NotImplementedError()
def put(self, obj):
raise NotImplementedError()
def delete(self, obj):
raise NotImplementedError()
def _find_models(self, model, **kwargs):
raise NotImplementedError()
def _find_model(self, model, **kwargs):
try:
return self._find_models(**kwargs)[0]
except IndexError:
return None
def _create_model(self, model, **kwargs):
obj = model(**kwargs)
return obj
def _bind_methods(self):
for model_name in 'user', 'role', 'provider_user', 'track_login':
model = getattr(self, '{}_model'.format(model_name))
if model:
find_models = 'find_{}s'.format(model_name)
find_model = 'find_{}'.format(model_name)
create_model = 'create_{}'.format(model_name)
if not hasattr(self, find_models):
setattr(self, find_models, partial(self._find_models, model))
if not hasattr(self, find_model):
setattr(self, find_model, partial(self._find_model, model))
if not hasattr(self, create_model):
setattr(self, create_model, partial(self._create_model, model))
class SQLAlchemyDatastore(Datastore):
def commit(self):
self.db.session.commit()
def put(self, obj):
self.db.session.add(obj)
def delete(self, obj):
self.db.session.delete(obj)
def _find_model(self, model, **kwargs):
return model.query.filter_by(**kwargs).first()
def _find_models(self, model, **kwargs):
return model.query.filter_by(**kwargs)
| {
"repo_name": "vgavro/flask-userflow",
"path": "flask_userflow/datastore.py",
"copies": "1",
"size": "2112",
"license": "mit",
"hash": -2606271743569402000,
"line_mean": 31.4923076923,
"line_max": 83,
"alpha_frac": 0.5823863636,
"autogenerated": false,
"ratio": 4.022857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5105243506457143,
"avg_score": null,
"num_lines": null
} |
from functools import partial
class Halt(Exception):
def __init__(self, *args):
self.return_args = args
class Encapsulate(object):
"""
Wraps object methods to create pre_ and post_ functions that are called before and after the function.
Encapsulate automatically looks for pre_<function_name> and post_<function_name> definitions and works
with overridden functions as well.
Usage:
def pre_myfunc():
print 'Pre Main func'
@Encapsulate
def myfunc():
print 'Main func'
def post_myfunc():
print 'Pre Main func'
myfunc()
>> 'Pre Main func'
>> 'Main func'
>> 'Post Main func'
"""
encapsulations = {}
halt = Halt
def __init__(self, func):
self._func = func
self._wrapper = None
self.overridden = False
self.objclass = None
self.instancemethods = {}
@property
def func_name(self):
return self._func.func_name
def __call__(self, instance, *args, **kwargs):
if self.overridden:
# This Encapsulation is overridden so this call is from a super(). Do not execute PRE and POST twice
return self._func(instance, *args, **kwargs)
pre_func = getattr(instance, 'pre_%s' % self._func.func_name, None)
post_func = getattr(instance, 'post_%s' % self._func.func_name, None)
try:
if pre_func is not None:
pre_func(*args, **kwargs)
ret = self._func(instance, *args, **kwargs)
if post_func is not None:
post_ret = post_func(*args, **kwargs)
except Halt as e:
return e.return_args
# return the post function value if it returns something other than None, otherwise return the
# main functions value
return post_ret or ret
def __get__(self, obj, objtype):
"""Support instance methods."""
if obj not in self.instancemethods:
self.objclass = obj.__class__
if obj not in Encapsulate.encapsulations:
Encapsulate.encapsulations[obj] = {self.func_name: []}
Encapsulate.encapsulations[obj][self.func_name].append(self)
# Go through Encapsulations on this objects function and determine which one
# is overridden. We need to know this so that when the overridden function is called
# the pre and post functions aren't called again.
for enc in Encapsulate.encapsulations[obj][self.func_name]:
# Determine the call order of the functions
call_order = []
for ii, cls in enumerate(objtype.__mro__):
func = cls.__dict__.get(self.func_name, None)
if isinstance(func, Encapsulate):
func = func._func
call_order.append(func)
try:
idx = call_order.index(enc._func)
except ValueError:
pass
else:
if idx > 0 and any(call_order[:idx]):
enc.overridden = True
self.instancemethods[obj] = partial(self.__call__, obj)
return self.instancemethods[obj]
def __repr__(self):
return 'Encapsulation for %s' % self._func.func_name | {
"repo_name": "lobocv/pyperform",
"path": "pyperform/encapsulate.py",
"copies": "1",
"size": "3386",
"license": "mit",
"hash": 9044553967083832000,
"line_mean": 29.5135135135,
"line_max": 112,
"alpha_frac": 0.5605434141,
"autogenerated": false,
"ratio": 4.374677002583979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006798358940955873,
"num_lines": 111
} |
from functools import partial
class HiggsException(Exception):
pass
class HiggsScopeException(HiggsException):
pass
class HiggsSyntaxException(HiggsException):
pass
class HiggsDeclarationException(HiggsSyntaxException):
pass
def find_in_scope(name, kwargs):
if name in kwargs:
return kwargs[name]
else:
try:
return getattr(global_scope, name)
except AttributeError:
raise HiggsScopeException(
u"Couldn't find name {} in any scope".format(name))
class GlobalScope(object):
importables = {}
NOT_PROVIDED = object()
def assign(self, name, value=NOT_PROVIDED, type_decl=NOT_PROVIDED, *args,
**kwargs):
interface = find_in_scope('interface', kwargs)
impl = find_in_scope('impl', kwargs)
# interface[name] = type(value)
if value is not self.NOT_PROVIDED:
impl[name] = value
interface[name] = type(value)
return
if type_decl is self.NOT_PROVIDED:
raise HiggsDeclarationException(
u"You must either provide a value for a declaration, "
u"or explicitly declare a type for it")
interface[name] = type_decl
def higgs_import(self, name):
if name not in self.importables:
raise ImportError
return self.importables[name]()
class Module(GlobalScope):
def inline(self, *args, **kwargs):
"""The code here will execute on import. It will define the interface
of the module (what names it exports, and their types)
In Higgs, this won't be a real function, it will consist of the actual
module code
"""
pass
def load(self, inline, *args, **kwargs):
"""Module initialization on import - default behavior.
By default we don't declare any new attributes here...though we could
:param inline: The code declared inline, in the module body
:param args:
:param kwargs:
:return:
"""
assign = find_in_scope('assign', kwargs)
interface = {}
impl = {}
assign = partial(assign, interface=interface, impl=impl)
inline(assign=assign)
self.interface = interface
self.impl = impl
def __init__(self, *args, **kwargs):
self.interface = None
self.impl = None
self.load(self.inline, *args, **kwargs)
class HiggsObject(object):
interface = None
impl = None
class HiggsFunction(HiggsObject):
interface = {
'pre': HiggsArgsSpec(),
'post': HiggsArgsSpec(),
'rtype': HiggsObject()
}
impl = None
@classmethod
def create_literal(cls, code=None, pre=None, post=None, rtype=None):
new_function = cls()
new_function.interface = {
'pre': pre,
'post': post,
'rtype': rtype
}
new_function.impl = code
class HiggsInt(HiggsObject):
interface = {
'$add': HiggsFunction.create_literal()
}
@classmethod
def create_literal(cls, value):
new_int = HiggsInt()
class HiggsArgsList(HiggsObject):
"""Represents an ordered, (immutable?) sequence of HiggsObjects
"""
interface = {
'$positional': HiggsList(),
'$keywords': 0
}
class HiggsArgsSpec(HiggsObject):
interface = {
}
class HiggsList(HiggsObject):
"""Represents an integer indexed, 0-based ordered typed array of HiggsObjects
The interfaces of the HOs must (at compile time) satisfy the type member
of the list
"""
interface = {
'$get_item': 0,
'$set_item': 0,
'$length': 0,
'$type': 0
}
class HiggsFrozenDict(HiggsObject):
"""A dictionary that after creation, its keys and values can not be changed
IDEA:
Unlike Python dicts, the keys don't have to return the same hash value,
because their individual object IDs will be used, not their hash value
-let's see how this works out... it works OK in Python, one must
simply inherit from types like sets, lists and dicts and use the
subclass thereof.... why all this nonsense? to provide a hook
for the user to implement weird behavior
"""
interface = {
# need generics :D :(( oh well.. let's work around this
'$get_item': HiggsFunction.create_literal(post=())
}
class HiggsDict(HiggsObject):
interface = {
}
class HiggsCode(HiggsObject):
interface = {
}
class WeirdModule(Module):
def inline(self, *args, **kwargs):
assign = find_in_scope('assign', kwargs)
assign('a', 3)
def increment(x):
return x + 1
assign('increment', increment)
assign('weird_name', type_decl=str)
assign('WeirdSubType')
global_scope = GlobalScope()
GlobalScope.importables[WeirdModule.__name__] = WeirdModule
| {
"repo_name": "vladiibine/higgs",
"path": "higgs/pyimpl/__init__.py",
"copies": "1",
"size": "4979",
"license": "mit",
"hash": 3158858601401974300,
"line_mean": 23.4068627451,
"line_max": 81,
"alpha_frac": 0.6033340028,
"autogenerated": false,
"ratio": 3.9895833333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5092917336133334,
"avg_score": null,
"num_lines": null
} |
from functools import partial
class _NoValue(object):
"""Represents an unset value. Used to differeniate between an explicit
``None`` and an unset value.
"""
pass
NoValue = _NoValue()
class ChainTest:
def __init__(self, list=[]):
self.func_lookup = {}
self.func_lookup['_in'] = '_in_'
self.func_lookup['_out'] = '_out_'
self.func_lookup['v'] = '_v_'
self.callmap = {}
self.callmap['_in'] = partial(self._in_)
self.callmap['_out'] = partial(self._out_)
self.callmap['v'] = partial(self._v_)
module = ChainTest
self._value = list
def _in_(self, *args):
self._value.append(args[0])
print('_in: ' + repr(args))
#return self
def _out_(self, *args):
self._value.append(args[0])
print('_out: ' + repr(args))
#return self
def _v_(self, *args):
self._value.append(args[0])
print('vertex: ' + repr(args))
#return self
def run(self):
"""Return current value of the chain operations."""
return self(self._value)
@classmethod
def get_method(cls, name):
"""Return valid 'module' method."""
print("Get Method - " + repr(name))
method = getattr(cls, name, None)
if not callable(method):
raise BaseException
return method
def __getattr__(self, name, *args):
print("Looking for " + name)
print(args)
func = self.func_lookup[name]
return ChainWrapper(self._value, self.get_method(func))
def __call__(self, value):
print("Call:Value = " + repr(value))
if isinstance(self._value, ChainWrapper):
value = self._value.unwrap(value)
return value
class ChainWrapper(object):
"""Wrap 'Chain' method call within a 'ChainWrapper' context."""
def __init__(self, value, method):
print("Making new Wrapper")
self._value = value
self.method = method
self.args = ()
self.kargs = {}
def _generate(self):
"""Generate a copy of this instance."""
print('In ChainWrapper _Generate')
new = self.__class__.__new__(self.__class__)
new.__dict__ = self.__dict__.copy()
return new
def unwrap(self, value=NoValue):
"""Execute 'method' with '_value', 'args', and 'kargs'. If '_value' is
an instance of 'ChainWrapper', then unwrap it before calling 'method'.
"""
# Generate a copy of ourself so that we don't modify the chain wrapper
# _value directly. This way if we are late passing a value, we don't
# "freeze" the chain wrapper value when a value is first passed.
# Otherwise, we'd locked the chain wrapper value permanently and not be
# able to reuse it.
wrapper = self._generate()
print('UnWrapping')
if isinstance(wrapper._value, ChainWrapper):
wrapper._value = wrapper._value.unwrap(value)
elif not isinstance(value, ChainWrapper) and value is not NoValue:
# Override wrapper's initial value.
wrapper._value = value
if wrapper._value is not NoValue:
value = wrapper._value
return wrapper.method(value, *wrapper.args, **wrapper.kargs)
def __call__(self, *args, **kargs):
"""Invoke the 'method' with 'value' as the first argument and return a
new 'Chain' object with the return value.
"""
print("Wrapper-Call:Value = " + str(args))
self.args = args
self.kargs = kargs
return ChainTest(self)
if __name__ == "__main__":
c = ChainTest()
#c.v(1)._in('Test')._out('Test2')._in('Test3')
c.v(1)._in_('Test')._out_('Test2')._in_('Test3')
#c.run()
#c("Call")
| {
"repo_name": "jgraham20/graphdb",
"path": "graphdb/chaintest.py",
"copies": "1",
"size": "3820",
"license": "mit",
"hash": -3084902221438974000,
"line_mean": 27.5074626866,
"line_max": 79,
"alpha_frac": 0.5612565445,
"autogenerated": false,
"ratio": 3.9381443298969074,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9986204091665445,
"avg_score": 0.0026393565462924547,
"num_lines": 134
} |
from functools import partial
class OSCCallbackBase(object):
def __init__(self, receiver, callback=lambda x, y: None, callbacks=None):
self.receiver = receiver
self.callbacks = callbacks
self.callback = callback
def attach(self):
self._recv_callbacks = []
if self.callbacks:
for (idx, cb) in enumerate(self.callbacks):
recvcb = partial(self._callback, cb, self.address)
self._recv_callbacks.append((self.address, recvcb))
self.receiver.addCallback(self.address, recvcb)
else:
recvcb = partial(self._callback, self.callback, self.address)
self._recv_callbacks.append((self.address, recvcb))
self.receiver.addCallback(self.address, recvcb)
return self
class Touch(OSCCallbackBase):
address = "/touch"
def _callback(self, cb, node, message, address):
pass
class Acc(OSCCallbackBase):
address = "/acc"
def _callback(self, cb, node, message, address):
compass, pitch, roll = message.arguments
compass = float(compass) / 360.
pitch = (float(pitch) + 180) / 360.
roll = (float(roll) + 90) / 180.
cb(compass, pitch, roll)
class Ori(OSCCallbackBase):
address = "/ori"
def _callback(self, cb, node, message, address):
compass, pitch, roll = message.arguments
compass = int(compass) / 360.
pitch = (int(pitch) + 180) / 360.
roll = (int(roll) + 90) / 180.
cb(compass, pitch, roll)
def main():
from pyo import Sine
from bl.dsp import startPYO
s = startPYO()
from twisted.internet import reactor
from txosc.dispatch import Receiver
from txosc.async import DatagramServerProtocol
receiver = Receiver()
reactor.listenUDP(17779, DatagramServerProtocol(receiver),
interface='0.0.0.0')
sineMulMul = Sine(1)
sineMul = Sine(1, mul=sineMulMul)
sine = Sine([110, 110 + (55 / 32.)], mul=sineMul)
def cb(x, y, z):
sine.freq = [x * 220, x * 220 + (55 / 32.)]
sine.mul.freq = y * 55
sine.mul.mul.freq = z * 55
ori = Ori(receiver, cb)
acc = Acc(receiver, cb)
return s, ori, acc, sine
| {
"repo_name": "djfroofy/beatlounge",
"path": "bl/osc/andosc.py",
"copies": "1",
"size": "2254",
"license": "mit",
"hash": 1361050764079484700,
"line_mean": 27.8974358974,
"line_max": 77,
"alpha_frac": 0.5980479148,
"autogenerated": false,
"ratio": 3.594896331738437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4692944246538437,
"avg_score": null,
"num_lines": null
} |
from functools import partial
class Promise(object):
"""A promise object that attempts to mirror the ES6 APIs for promise
objects. Unlike ES6 promises this one however also directly gives
access to the underlying value and it has some slightly different
static method names as this promise can be resolved externally.
"""
__slots__ = ('value', 'reason', '_state', '_callbacks', '_errbacks')
def __init__(self):
#: the value that this promise holds if it's resolved.
self.value = None
#: the reason for this promise if it's rejected.
self.reason = None
self._state = 'pending'
self._callbacks = []
self._errbacks = []
@staticmethod
def resolved(value):
"""Creates a promise object resolved with a certain value."""
p = Promise()
p._state = 'resolved'
p.value = value
return p
@staticmethod
def rejected(reason):
"""Creates a promise object rejected with a certain value."""
p = Promise()
p._state = 'rejected'
p.reason = reason
return p
@staticmethod
def all(iterable_or_dict):
"""A promise that resolves when all passed promises resolve. You can
either pass a list or a dictionary of promises.
"""
if isinstance(iterable_or_dict, dict):
return _promise_from_dict(iterable_or_dict)
return _promise_from_iterable(iterable_or_dict)
def resolve(self, value):
"""Resolves the promise with the given value."""
if self is value:
raise TypeError('Cannot resolve promise with itself.')
if isinstance(value, Promise):
value.done(self.resolve, self.reject)
return
if self._state != 'pending':
raise RuntimeError('Promise is no longer pending.')
self.value = value
self._state = 'resolved'
callbacks = self._callbacks
self._callbacks = None
for callback in callbacks:
callback(value)
def reject(self, reason):
"""Rejects the promise with the given reason."""
if self._state != 'pending':
raise RuntimeError('Promise is no longer pending.')
self.reason = reason
self._state = 'rejected'
errbacks = self._errbacks
self._errbacks = None
for errback in errbacks:
errback(reason)
@property
def is_pending(self):
"""`True` if the promise is still pending, `False` otherwise."""
return self._state == 'pending'
@property
def is_resolved(self):
"""`True` if the promise was resolved, `False` otherwise."""
return self._state == 'resolved'
@property
def is_rejected(self):
"""`True` if the promise was rejected, `False` otherwise."""
return self._state == 'rejected'
def done(self, on_success=None, on_failure=None):
"""Attaches some callbacks to the promise and returns the promise."""
if on_success is not None:
if self._state == 'pending':
self._callbacks.append(on_success)
elif self._state == 'resolved':
on_success(self.value)
if on_failure is not None:
if self._state == 'pending':
self._errbacks.append(on_failure)
elif self._state == 'rejected':
on_failure(self.reason)
return self
def then(self, success=None, failure=None):
"""A utility method to add success and/or failure callback to the
promise which will also return another promise in the process.
"""
rv = Promise()
def on_success(v):
try:
rv.resolve(success(v))
except Exception as e:
rv.reject(e)
def on_failure(r):
try:
rv.resolve(failure(r))
except Exception as e:
rv.reject(e)
self.done(on_success, on_failure)
return rv
def __repr__(self):
if self._state == 'pending':
v = '(pending)'
elif self._state == 'rejected':
v = repr(self.reason) + ' (rejected)'
else:
v = repr(self.value)
return '<%s %s>' % (
self.__class__.__name__,
v,
)
def _ensure_promise(value):
return value if isinstance(value, Promise) else Promise.resolved(value)
def _promise_from_iterable(iterable):
l = [_ensure_promise(x) for x in iterable]
if not l:
return Promise.resolved([])
pending = set(l)
rv = Promise()
def on_success(promise, value):
pending.discard(promise)
if not pending:
rv.resolve([p.value for p in l])
for promise in l:
promise.done(partial(on_success, promise), rv.reject)
return rv
def _promise_from_dict(d):
d = dict((k, _ensure_promise(v)) for k, v in d.iteritems())
if not d:
return Promise.resolved({})
pending = set(d.keys())
rv = Promise()
def on_success(key, value):
pending.discard(key)
if not pending:
rv.resolve(dict((k, p.value) for k, p in d.iteritems()))
for key, promise in d.iteritems():
promise.done(partial(on_success, key), rv.reject)
return rv
| {
"repo_name": "tempbottle/rb",
"path": "rb/promise.py",
"copies": "7",
"size": "5354",
"license": "apache-2.0",
"hash": -4502858496644598300,
"line_mean": 28.9106145251,
"line_max": 77,
"alpha_frac": 0.5704146433,
"autogenerated": false,
"ratio": 4.16006216006216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.823047680336216,
"avg_score": null,
"num_lines": null
} |
from functools import partial
class TranslatorFormatException(Exception):
"""Exception raised when a given translation spec is not in a valid format."""
pass
def get_value_at_path(path, obj):
"""
Attempts to get the value of a field located at the given field path within the
object. Recurses into child dicts as necessary. Very permissive - if the branch
nodes on a given path do not exist, or even if they are of a different type,
this will be handled and (False, None) will simply be returned.
Path can be provided either as a dot-delimited string or as an array of path
elements.
Returns a two member tuple containing a boolean indicating whether the field was
found and the value itself. If the value was not found the second returned value
is None.
"""
if isinstance(path, basestring):
path = path.split('.')
if path[0] in obj:
if len(path) > 1:
if not isinstance(obj[path[0]], dict):
return
return get_value_at_path(path[1:], obj[path[0]])
return obj[path[0]]
def set_value_at_path(obj, path, value):
"""
Sets the given key in the given dict object to the given value. If the
given path is nested, child dicts are created as appropriate.
Accepts either a dot-delimited path or an array of path elements as the
`path` variable.
"""
if isinstance(path, basestring):
path = path.split('.')
if len(path) > 1:
set_value_at_path(obj.setdefault(path[0], {}), path[1:], value)
else:
obj[path[0]] = value
def validate_spec(spec):
"""
Checks that the given spec is a valid spec that can be used by
the translator function.
"""
for from_path, to_path in spec.iteritems():
if isinstance(to_path, basestring):
continue
elif callable(to_path):
continue
raise TranslatorFormatException()
def decorate_spec(spec):
"""
Returns a copy of the given spec with simple key-based lookups replaced
which functions which will actually implement those lookups.
"""
decorated = {}
for dest_path, source_path in spec.iteritems():
if isinstance(source_path, basestring):
decorated[dest_path] = partial(get_value_at_path, source_path)
elif callable(source_path):
decorated[dest_path] = source_path
return decorated
def translator(spec):
validate_spec(spec)
spec = decorate_spec(spec)
def impl(source):
"""
Executes the dictionary translation encapsulated by this function on
the given `source` dict.
"""
end = {}
for dest_path, source_fn in spec.iteritems():
set_value_at_path(end, dest_path, source_fn(source))
return end
return impl
| {
"repo_name": "gamechanger/missandei",
"path": "missandei/translator.py",
"copies": "1",
"size": "2836",
"license": "mit",
"hash": -2972971455478178000,
"line_mean": 29.4946236559,
"line_max": 84,
"alpha_frac": 0.6413963329,
"autogenerated": false,
"ratio": 4.201481481481482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5342877814381481,
"avg_score": null,
"num_lines": null
} |
from functools import partial
def alloc_with_destructor_factory(type, constructor, destructor):
def free(data):
destructor(data)
lib.free(data)
allocator = ffi.new_allocator(alloc=lib.malloc, free=free, should_clear_after_alloc=False)
def alloc(initialized=True):
"""
/!\ With `initialized=False`, you must use `lib.godot_*_new` on the
result otherwise strang things will happened when destructor kicks in /!\
"""
data = allocator(type)
if initialized:
constructor(data)
return data
alloc.allocator = allocator
return alloc
# Simplest types
godot_bool_alloc = partial(ffi.new, 'godot_bool*')
godot_int_alloc = partial(ffi.new, 'godot_int*')
godot_real_alloc = partial(ffi.new, 'godot_real*')
godot_object_alloc = partial(ffi.new, 'godot_object**')
# Allocation of struct with no destructor
godot_vector3_alloc = partial(ffi.new, 'godot_vector3*')
godot_vector2_alloc = partial(ffi.new, 'godot_vector2*')
godot_transform2d_alloc = partial(ffi.new, 'godot_transform2d*')
godot_transform_alloc = partial(ffi.new, 'godot_transform*')
godot_rid_alloc = partial(ffi.new, 'godot_rid*')
godot_rect3_alloc = partial(ffi.new, 'godot_rect3*')
godot_rect2_alloc = partial(ffi.new, 'godot_rect2*')
godot_quat_alloc = partial(ffi.new, 'godot_quat*')
godot_plane_alloc = partial(ffi.new, 'godot_plane*')
godot_color_alloc = partial(ffi.new, 'godot_color*')
godot_basis_alloc = partial(ffi.new, 'godot_basis*')
# Use a custom memory allocator to handle destructors
godot_variant_alloc = alloc_with_destructor_factory(
'godot_variant*',
lib.godot_variant_new_nil,
lib.godot_variant_destroy
)
godot_string_alloc = alloc_with_destructor_factory(
'godot_string*',
lib.godot_string_new,
lib.godot_string_destroy
)
godot_node_path_alloc = alloc_with_destructor_factory(
'godot_node_path*',
lambda path=godot_string_alloc(): lib.godot_node_path_new,
lib.godot_node_path_destroy
)
godot_dictionary_alloc = alloc_with_destructor_factory(
'godot_dictionary*',
lib.godot_dictionary_new,
lib.godot_dictionary_destroy
)
godot_array_alloc = alloc_with_destructor_factory(
'godot_array*',
lib.godot_array_new,
lib.godot_array_destroy
)
godot_pool_byte_array_alloc = alloc_with_destructor_factory(
'godot_pool_byte_array*',
lib.godot_pool_byte_array_new,
lib.godot_pool_byte_array_destroy
)
godot_pool_int_array_alloc = alloc_with_destructor_factory(
'godot_pool_int_array*',
lib.godot_pool_int_array_new,
lib.godot_pool_int_array_destroy
)
godot_pool_real_array_alloc = alloc_with_destructor_factory(
'godot_pool_real_array*',
lib.godot_pool_real_array_new,
lib.godot_pool_real_array_destroy
)
godot_pool_string_array_alloc = alloc_with_destructor_factory(
'godot_pool_string_array*',
lib.godot_pool_string_array_new,
lib.godot_pool_string_array_destroy
)
godot_pool_color_array_alloc = alloc_with_destructor_factory(
'godot_pool_color_array*',
lib.godot_pool_color_array_new,
lib.godot_pool_color_array_destroy
)
godot_pool_vector2_array_alloc = alloc_with_destructor_factory(
'godot_pool_vector2_array*',
lib.godot_pool_vector2_array_new,
lib.godot_pool_vector2_array_destroy
)
godot_pool_vector3_array_alloc = alloc_with_destructor_factory(
'godot_pool_vector3_array*',
lib.godot_pool_vector3_array_new,
lib.godot_pool_vector3_array_destroy
)
| {
"repo_name": "razvanc-r/godot-python",
"path": "pythonscript/cffi_bindings/allocator.inc.py",
"copies": "1",
"size": "3467",
"license": "mit",
"hash": 17848402057385394,
"line_mean": 34.0202020202,
"line_max": 94,
"alpha_frac": 0.7083934237,
"autogenerated": false,
"ratio": 2.7713828936850518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39797763173850514,
"avg_score": null,
"num_lines": null
} |
from functools import partial
def str_to_gd_node_path(path, to_variant=False):
gd_str = pyobj_to_gdobj(path)
gd_ptr = godot_node_path_alloc()
lib.godot_node_path_new(gd_ptr, gd_str)
if to_variant:
gdvar_ptr = godot_variant_alloc()
lib.godot_variant_new_node_path(gdvar_ptr, gd_ptr)
return gd_ptr
class NodePath(BaseBuiltinWithGDObjOwnership):
__slots__ = ()
GD_TYPE = lib.GODOT_VARIANT_TYPE_NODE_PATH
@staticmethod
def _copy_gdobj(gdobj):
cpy_gdobj = godot_node_path_alloc()
lib.godot_node_path_new_copy(cpy_gdobj, gdobj)
return cpy_gdobj
def __init__(self, path):
self._check_param_type('path', path, str)
gd_str = pyobj_to_gdobj(path)
self._gd_ptr = godot_node_path_alloc()
lib.godot_node_path_new(self._gd_ptr, gd_str)
def __eq__(self, other):
return isinstance(other, NodePath) and self.path == other.path
def __ne__(self, other):
return not self == other
def __repr__(self):
return "<%s(path=%r)>" % (type(self).__name__, self.path)
@property
def path(self):
gd_repr = lib.godot_node_path_as_string(self._gd_ptr)
return ffi.string(lib.godot_string_unicode_str(ffi.addressof(gd_repr)))
def get_name(self, idx):
self._check_param_type('idx', idx, int)
name = lib.godot_node_path_get_name(self._gd_ptr, idx)
return godot_string_to_pyobj(ffi.addressof(name))
def get_name_count(self):
return lib.godot_node_path_get_name_count(self._gd_ptr)
def get_property(self):
prop = lib.godot_node_path_get_property(self._gd_ptr)
return godot_string_to_pyobj(ffi.addressof(prop))
def get_subname(self, idx):
self._check_param_type('idx', idx, int)
subname = lib.godot_node_path_get_subname(self._gd_ptr, idx)
return godot_string_to_pyobj(ffi.addressof(subname))
def get_subname_count(self):
return lib.godot_node_path_get_subname_count(self._gd_ptr)
def is_absolute(self):
return bool(lib.godot_node_path_is_absolute(self._gd_ptr))
def is_empty(self):
return bool(lib.godot_node_path_is_empty(self._gd_ptr))
| {
"repo_name": "razvanc-r/godot-python",
"path": "pythonscript/cffi_bindings/builtin_node_path.inc.py",
"copies": "1",
"size": "2211",
"license": "mit",
"hash": 8119550902235342000,
"line_mean": 31.5147058824,
"line_max": 79,
"alpha_frac": 0.6277702397,
"autogenerated": false,
"ratio": 2.9015748031496065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9029345042849606,
"avg_score": 0,
"num_lines": 68
} |
from functools import partial
def T(type_, obj):
"""
Returns True if obj is of type type_
"""
return isinstance(obj, type_)
is_list = partial(T, list)
is_dict = partial(T, dict)
is_tuple = partial(T, tuple)
is_set = partial(T, set)
is_str = partial(T, basestring)
def is_juicy_list(obj):
"""
Returns True if obj is a non-empty list.
"""
return obj and is_list(obj)
def is_super_list(obj):
"""
Returns True if obj is a list of lists.
"""
return is_juicy_list(obj) and is_list(obj[0])
def is_collection(obj):
"""
Returns True if obj is a list, dict, tuple or set.
"""
return is_list(obj) or is_dict(obj) or is_tuple(obj) or is_set(obj)
def flatten(lst):
"""
Flattens lst_
"""
return [item for sublist in lst for item in sublist]
def negate(fn):
"""
Returns the opposite Truth Value that fn would return.
"""
return lambda *args: not fn(*args)
def split_strings(col):
"""
Recursively split the strings in col
"""
maybe_split = lambda x: [] if x is None else x.split()
return [item if is_collection(item) else maybe_split(item) for item in col]
def split_and_reduce(col):
"""
Recursively splits and concatenate the strings in col
"""
return sum(split_strings(col), [])
def to_numeric_bool(cond, col=None):
"""If no col is supplied, returns 1 if cond is True or 0 otherwise.
If col is supplied returns 1 if cond is True and all elements are True,
or if cond is False and any of the elements is True.
"""
if col:
fn = all if cond else any
return 1 if fn(col) else 0
else:
return 1 if cond else 0
def to_tuples(col):
"""
Recursively creates a tuple from every element in col
"""
if not is_collection(col):
return col
elif is_dict(col):
return tuple(to_tuples(item) for item in col.items())
else:
return tuple(to_tuples(item) for item in col)
def to_dict(obj, visited=None):
"""
Recursively creates a dict from obj attributes and its values, skipping private and callable attributes.
When a cycle is found, the visited node is replaced with an empty dict.
"""
visited = visited or []
if is_list(obj) or is_tuple(obj) or is_set(obj):
return map(lambda x: to_dict(x, visited), obj)
elif hasattr(obj, '__dict__'):
return dict([(k, to_dict(v, visited + [(k, v)])) for k, v in vars(obj).items()
if not k.startswith('_') and not callable(v) and not (k, v) in visited])
elif is_dict(obj):
return dict([(k, to_dict(v, visited + [(k, v)])) for k, v in obj.items()
if not (k, v) in visited])
else:
return obj
def unique(col):
"""
Returns the unique elements in col (recursive)
"""
unique_elements = to_tuples(col.items()) if is_dict(col) else set(to_tuples(col))
return tuple(sorted(unique_elements))
def multi_get(dict_, keys):
"""
Recursively looks up keys in dict_
"""
if not is_dict(dict_) or not keys:
return dict_
if not is_collection(keys):
return dict_.get(keys)
return multi_get(dict_.get(keys[0]), keys[-1] if len(keys) > 1 else None)
def walk(obj, _path_so_far=[]):
"""
Returns a list of all the paths in the tree obj
"""
if is_list(obj):
return reduce(lambda accum, next_node: accum + walk(next_node, _path_so_far), obj, [])
elif is_dict(obj):
return reduce(lambda accum, (current_node, next_node):
accum + walk(next_node, _path_so_far + [current_node]), obj.items(), [])
return [_path_so_far + [obj]]
def expand_last_level(obj):
"""
Returns a list of the leaf nodes in the tree obj
"""
return [nodes[-1] for nodes in walk(obj) if nodes]
def expand_one_level(obj):
"""
Returns the next level of nodes in the tree obj
"""
if is_dict(obj):
return obj.values()
else:
return [item.values() for item in obj if is_collection(obj)]
| {
"repo_name": "podio/conssert",
"path": "conssert/navigate.py",
"copies": "1",
"size": "4078",
"license": "mit",
"hash": 5238859181970546000,
"line_mean": 24.6477987421,
"line_max": 108,
"alpha_frac": 0.6022560078,
"autogenerated": false,
"ratio": 3.4854700854700855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45877260932700853,
"avg_score": null,
"num_lines": null
} |
from functools import partial
def update_schema_if_mandatory(response, schema, patch_collection):
if 'details' in response:
collection_schema = response['details']['existing']['schema']
else:
collection_schema = response['data'].get('schema')
if schema and (not collection_schema or collection_schema != schema):
patch_collection(data={'schema': schema})
def get_kinto_records(kinto_client, bucket, collection, permissions,
schema=None):
"""Return all the kinto records for this bucket/collection."""
# Create bucket if needed
kinto_client.create_bucket(bucket, if_not_exists=True)
response = kinto_client.create_collection(
collection, bucket, permissions=permissions, if_not_exists=True)
patch_collection = partial(kinto_client.patch_collection,
bucket=bucket, collection=collection)
update_schema_if_mandatory(response, schema, patch_collection)
return kinto_client.get_records(bucket=bucket, collection=collection)
| {
"repo_name": "mozilla-services/xml2kinto",
"path": "xml2kinto/kinto.py",
"copies": "1",
"size": "1052",
"license": "apache-2.0",
"hash": -5528433437944003000,
"line_mean": 37.962962963,
"line_max": 73,
"alpha_frac": 0.6910646388,
"autogenerated": false,
"ratio": 4.311475409836065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 27
} |
from functools import partial
nth = lambda i: lambda seq: seq[i]
first = nth(0)
second = nth(1)
seq = {'new': lambda op, args: op(args),
'op': lambda term: type(term),
'args': lambda term: tuple(term),
'isleaf':lambda term: False}
term_registry = {
dict: {'new': lambda keys, args: dict(zip(keys, args)),
'op': lambda term: (type(term), tuple(sorted(term.keys()))),
'args': lambda term: tuple(map(second, sorted(term.items()))),
'isleaf':lambda term: False},
list: seq,
tuple: seq
}
def new(op, args):
if isinstance(op, type) and issubclass(op, (tuple, list)):
return op(args)
if isinstance(op, tuple) and issubclass(op[0], dict):
keys = op[1]
return op[0](zip(keys, args))
if op in term_registry:
return term_registry[op]['new'](args)
if hasattr(op, '_term_new'):
return op._term_new(args)
raise NotImplementedError()
def make(fnname, term):
typ = type(term)
if typ in term_registry:
return term_registry[typ][fnname](term)
methodname = '_term_'+fnname
if hasattr(term, methodname):
return getattr(term, methodname)()
raise NotImplementedError()
op = partial(make, 'op'); op.func_name = 'op'
args = partial(make, 'args'); op.func_name = 'args'
def isleaf(term):
typ = type(term)
if typ in term_registry:
return term_registry[typ]['isleaf'](term)
if hasattr(term, '_term_isleaf') and not isinstance(term, type):
return getattr(term, '_term_isleaf')()
return True
def attr_new(op, args):
obj = object.__new__(op)
obj.__dict__.update(args)
return obj
def attr_op(term):
return type(term)
def attr_args(term):
return term.__dict__
def attr_isleaf(term):
return False
def termify_attr(cls):
cls._term_new = classmethod(attr_new)
cls._term_op = attr_op
cls._term_args = attr_args
cls._term_isleaf = attr_isleaf
return cls
def slot_new(op, args):
obj = object.__new__(op)
for slot, arg in zip(op.__slots__, args):
setattr(obj, slot, arg)
return obj
def slot_op(term):
return type(term)
def slot_args(term):
return tuple(map(term.__getattribute__, term.__slots__))
def slot_isleaf(term):
return False
def termify_slot(cls):
cls._term_new = classmethod(slot_new)
cls._term_op = slot_op
cls._term_args = slot_args
cls._term_isleaf = slot_isleaf
return cls
def termify(cls):
if hasattr(cls, '__slots__'):
return termify_slot(cls)
else:
return termify_attr(cls)
| {
"repo_name": "mrocklin/termpy",
"path": "termpy/ground.py",
"copies": "1",
"size": "2599",
"license": "bsd-3-clause",
"hash": -3467711011291837000,
"line_mean": 26.0729166667,
"line_max": 75,
"alpha_frac": 0.6067718353,
"autogenerated": false,
"ratio": 3.3024142312579414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44091860665579413,
"avg_score": null,
"num_lines": null
} |
from functools import partial
# Replace this with actual implementation from
# http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/
# (though this will work for simple cases)
def mro(*bases):
return bases[0].__mro__
# This definition is only used to assist static code analyzers
def copy_ancestor_docstring(fn):
'''Copy docstring for method from superclass
For this decorator to work, the class has to use the `InheritableDocstrings`
metaclass.
'''
raise RuntimeError('Decorator can only be used in classes '
'using the `InheritableDocstrings` metaclass')
def _copy_ancestor_docstring(mro, fn):
'''Decorator to set docstring for *fn* from *mro*'''
if fn.__doc__ is not None:
raise RuntimeError('Function already has docstring')
# Search for docstring in superclass
for cls in mro:
super_fn = getattr(cls, fn.__name__, None)
if super_fn is None:
continue
fn.__doc__ = super_fn.__doc__
break
else:
raise RuntimeError("Can't inherit docstring for %s: method does not "
"exist in superclass" % fn.__name__)
return fn
class InheritableDocstrings(type):
@classmethod
def __prepare__(cls, name, bases, **kwds):
classdict = super().__prepare__(name, bases, *kwds)
# Inject decorators into class namespace
classdict['copy_ancestor_docstring'] = partial(_copy_ancestor_docstring, mro(*bases))
return classdict
def __new__(cls, name, bases, classdict):
# Decorator may not exist in class dict if the class (metaclass
# instance) was constructed with an explicit call to `type`.
# (cf http://bugs.python.org/issue18334)
if 'copy_ancestor_docstring' in classdict:
# Make sure that class definition hasn't messed with decorators
copy_impl = getattr(classdict['copy_ancestor_docstring'], 'func', None)
if copy_impl is not _copy_ancestor_docstring:
raise RuntimeError('No copy_ancestor_docstring attribute may be created '
'in classes using the InheritableDocstrings metaclass')
# Delete decorators from class namespace
del classdict['copy_ancestor_docstring']
return super().__new__(cls, name, bases, classdict)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578587_Inherit_method_docstrings_without_breaking/recipe-578587.py",
"copies": "1",
"size": "2425",
"license": "mit",
"hash": -5224659705916750000,
"line_mean": 36.890625,
"line_max": 93,
"alpha_frac": 0.6301030928,
"autogenerated": false,
"ratio": 4.507434944237918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5637538037037918,
"avg_score": null,
"num_lines": null
} |
from functools import partial
try:
# django 1.7
from django.contrib.admin.utils import flatten_fieldsets
from django.forms.models import modelform_defines_fields
except ImportError:
from django.contrib.admin.util import flatten_fieldsets
from django.contrib.admin.options import ModelAdmin, InlineModelAdmin
from django import forms
from app_data.forms import multiform_factory, multiinlineformset_factory, MultiForm
class AppDataAdminMixin(object):
multiform = MultiForm
app_form_opts = {}
def _get_form_factory_opts(self, request, obj=None, **kwargs):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# construct the form_opts from declared_fieldsets
form_opts = self.app_form_opts.copy()
if fields is not None:
# put app_name prefixed fields into form_opts
for f in fields:
if '.' not in f:
continue
label, name = f.split('.')
app_fields = form_opts.setdefault(label, {}).setdefault('fields', [])
if name not in app_fields:
app_fields.append(name)
# .. and remove them from fields for the model form
fields = [f for f in fields if '.' not in f]
# do the same for exclude
for f in exclude:
if '.' not in f:
continue
label, name = f.split('.')
app_fields = form_opts.setdefault(label, {}).setdefault('exclude', [])
if name not in app_fields:
app_fields.append(name)
exclude = [f for f in exclude if '.' not in f]
# if exclude is an empty list we pass None to be consistant with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"multiform": self.multiform,
"form_opts": form_opts,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if hasattr(forms, 'ALL_FIELDS'):
# Django 1.7
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return defaults
class AppDataModelAdmin(AppDataAdminMixin, ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.multiform is None:
return super(AppDataModelAdmin, self).get_form(request, obj=obj, **kwargs)
return multiform_factory(self.model, **self._get_form_factory_opts(request, obj, **kwargs))
class AppDataInlineModelAdmin(AppDataAdminMixin, InlineModelAdmin):
def get_formset(self, request, obj=None, **kwargs):
if self.multiform is None:
return super(AppDataModelAdmin, self).get_formset(request, obj=obj, **kwargs)
can_delete = self.can_delete
if hasattr(self, 'has_delete_permission'):
can_delete = can_delete and self.has_delete_permission(request, obj)
defaults = {
"formset": self.formset,
"fk_name": self.fk_name,
"extra": self.extra,
"max_num": self.max_num,
"can_delete": can_delete,
}
defaults.update(self._get_form_factory_opts(request, obj, **kwargs))
return multiinlineformset_factory(self.parent_model, self.model, **defaults)
class AppDataStackedInline(AppDataInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class AppDataTabularInline(AppDataInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| {
"repo_name": "Venturi/oldcms",
"path": "env/lib/python2.7/site-packages/app_data/admin.py",
"copies": "1",
"size": "4349",
"license": "apache-2.0",
"hash": -8609018026784354000,
"line_mean": 37.8303571429,
"line_max": 99,
"alpha_frac": 0.6123246723,
"autogenerated": false,
"ratio": 4.205996131528046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5318320803828046,
"avg_score": null,
"num_lines": null
} |
from functools import partial
try:
from raven.utils.compat import (
string_types,
binary_type,
)
except ImportError:
from raven._compat import (
string_types,
binary_type,
)
from raven.processors import SanitizePasswordsProcessor
from raven.utils import varmap
class PyramidSanitizePasswordsProcessor(SanitizePasswordsProcessor):
"""Do some extra sanitization to pick up places where pyramid tends to
leak passwords through"""
def sensitive_repr_filter(self, key, value):
for field in self.FIELDS:
if isinstance(value, string_types) and field + '=' in value:
return '[Filtered]'
return value
def filter_stacktrace(self, data):
"""Filter out any local variables that contain sensitive-looking
patterns in their repr()s"""
super(PyramidSanitizePasswordsProcessor, self).filter_stacktrace(data)
for frame in data.get('frames', []):
if 'vars' not in frame:
continue
frame['vars'] = varmap(self.sensitive_repr_filter, frame['vars'])
def filter_http(self, data):
"""Also descend into env, headers looking for keyval-ish strings"""
super(PyramidSanitizePasswordsProcessor, self).filter_http(data)
for n in ('headers', 'env', 'data'):
if isinstance(data.get(n), dict):
data[n] = varmap(
partial(self.vm_sanitize_keyval, delimiter='&'),
data[n],
)
elif isinstance(data.get(n), binary_type):
data[n] = self.sensitive_repr_filter(
n, data.get(n).decode('utf8')
).encode('utf8')
def vm_sanitize_keyval(self, key, keyval, delimiter):
"""varmap-friendly way to call _sanitize_keyvals
Also handles mixed types in env"""
if isinstance(keyval, string_types):
return self._sanitize_keyvals(keyval, delimiter)
else:
return keyval
| {
"repo_name": "npilon/pyramid_crow",
"path": "pyramid_crow/processors.py",
"copies": "1",
"size": "2037",
"license": "apache-2.0",
"hash": 2869598427867953000,
"line_mean": 33.5254237288,
"line_max": 78,
"alpha_frac": 0.6053019146,
"autogenerated": false,
"ratio": 4.2974683544303796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5402770269030379,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.