code
stringlengths 1
199k
|
|---|
import ana
import weakref
default_plugins = { }
class SimStatePlugin(ana.Storable):
#__slots__ = [ 'state' ]
def __init__(self):
self.state = None
# Sets a new state (for example, if the state has been branched)
def set_state(self, state):
if state is None or type(state).__name__ == 'weakproxy':
self.state = state
else:
self.state = weakref.proxy(state)
# Should return a copy of the state plugin.
def copy(self):
raise Exception("copy() not implement for %s", self.__class__.__name__)
def merge(self, others, merge_flag, flag_values): # pylint: disable=W0613
'''
Should merge the state plugin with the provided others.
others - the other state plugin
merge_flag - a symbolic expression for the merge flag
flag_values - the values to compare against to check which content should be used.
self.symbolic_content = self.state.se.If(merge_flag == flag_values[0], self.symbolic_content, other.se.symbolic_content)
Can return a sequence of constraints to be added to the state.
'''
raise Exception("merge() not implement for %s", self.__class__.__name__)
def widen(self, others, merge_flag, flag_values):
"""
The widening operation for plugins.
"""
raise Exception('widen() not implemented for %s', self.__class__.__name__)
@staticmethod
def register_default(name, cls):
if name in default_plugins:
raise Exception("%s is already set as the default for %s" % (default_plugins[name], name))
default_plugins[name] = cls
|
from collections import Counter
from django.contrib import admin
from django.contrib.auth.models import User
from gem.models import GemCommentReport, Invite
from gem.rules import ProfileDataRule, CommentCountRule
from molo.commenting.admin import MoloCommentAdmin, MoloCommentsModelAdmin
from molo.commenting.models import MoloComment
from molo.profiles.models import UserProfile
from molo.forms.models import FormsSegmentUserGroup
from wagtail.contrib.modeladmin.helpers import PermissionHelper
from wagtail.contrib.modeladmin.options import (
ModelAdmin as WagtailModelAdmin, modeladmin_register)
from wagtail.contrib.modeladmin.views import CreateView
class InviteAdmin(WagtailModelAdmin):
model = Invite
menu_order = 600
menu_icon = 'mail'
menu_label = 'Invites'
add_to_settings_menu = True
search_fields = ['email']
list_filter = ['is_accepted', 'created_at']
list_display = [
'email', 'created_at', 'modified_at', 'is_accepted', 'user',
]
class InviteCreateView(CreateView):
def form_valid(self, form):
site = self.request._wagtail_site
if not form.instance.user:
form.instance.user = self.request.user
if not form.instance.site:
form.instance.site = site
return super().form_valid(form)
create_view_class = InviteCreateView
modeladmin_register(InviteAdmin)
class UserProfileInlineModelAdmin(admin.StackedInline):
model = UserProfile
can_delete = False
class GemCommentReportModelAdmin(admin.StackedInline):
model = GemCommentReport
can_delete = True
max_num = 0
actions = None
readonly_fields = ["user", "reported_reason", ]
class FormsSegementUserPermissionHelper(PermissionHelper):
def __init__(self, model, inspect_view_enabled=False):
model = FormsSegmentUserGroup
super(FormsSegementUserPermissionHelper, self).__init__(
model, inspect_view_enabled
)
class GemCommentModelAdmin(MoloCommentsModelAdmin):
list_display = (
'comment', 'parent_comment', 'moderator_reply', 'content', '_user',
'is_removed', 'is_reported', 'reported_count', 'reported_reason',
'submit_date', 'country')
def reported_reason(self, obj):
all_reported_reasons = list(
GemCommentReport.objects.filter(comment=obj.pk).values_list(
'reported_reason', flat=True))
breakdown_of_reasons = []
for value, count in Counter(all_reported_reasons).most_common():
reason = '%s, (%s)' % (value, count)
breakdown_of_reasons.append(reason)
return breakdown_of_reasons
def reported_count(self, obj):
return GemCommentReport.objects.filter(comment=obj.pk).count()
class GemCommentReportAdmin(MoloCommentAdmin):
inlines = (GemCommentReportModelAdmin,)
class ProfileDataRuleAdminInline(admin.TabularInline):
"""
Inline the ProfileDataRule into the administration
interface for segments.
"""
model = ProfileDataRule
class CommentCountRuleAdminInline(admin.TabularInline):
"""
Inline the CommentCountRule into the administration
interface for segments.
"""
model = CommentCountRule
admin.site.unregister(User)
admin.site.unregister(MoloComment)
admin.site.register(MoloComment, GemCommentReportAdmin)
|
import pytest
from hypr.helpers.mini_dsl import Range
@pytest.mark.populate(10)
class TestIntervalTypes:
models = 'SQLiteModel',
# interval notation
def test_closed_interval(self, model):
"""Test explicit bound interval."""
ref = [model.one(i) for i in range(2, 7) if model.one(i)]
rv = sorted(model.get(id=Range(2, 7)))
assert rv == ref
def test_right_open(self, model):
"""Interval with a minimum value only."""
ref = [model.one(i) for i in range(7, 100) if model.one(i)]
rv = sorted(model.get(id=Range(start=7)))
assert rv == ref
def test_left_open(self, model):
"""Interval with a maximum value only."""
ref = [model.one(i) for i in range(0, 3) if model.one(i)]
rv = sorted(model.get(id=Range(stop=3)))
assert rv == ref
def test_negation(self, model):
"""Test negation of an interval."""
ref = sorted(model.get(id=Range(stop=2)) +
model.get(id=Range(start=7)))
rv = sorted(model.get(id=(False, Range(2, 7))))
assert rv == ref
A = Range(10, 20)
B = Range(15, 25)
A_and_B = Range(15, 20)
A_or_B = Range(10, 25)
@pytest.mark.populate(30)
class TestIntervalCombination:
"""Test logical operators."""
models = 'SQLiteModel',
def test_false(self, model):
"""Test an interval always false."""
assert model.get(id=(False, Range(0, 100))) == []
def test_true(self, model):
"""Test an interval always true."""
ref = sorted(model.get())
rv = sorted(model.get(id=Range(0, 100)))
assert rv == ref
def test_conjunction(self, model):
"""A ∧ B."""
ref = model.get(id=A_and_B)
rv = model.get(id=((True, A, 0), (True, B, 1)))
assert sorted(rv) == sorted(ref)
def test_disjunction(self, model):
"""A ∨ B."""
ref = model.get(id=A_or_B)
rv = model.get(id=(A, B))
assert sorted(rv) == sorted(ref)
def test_nand(self, model):
"""A ⊼ B encoded as ¬A ∨ ¬B."""
ref = model.get(id=(False, A_and_B))
rv = model.get(id=((False, A), (False, B)))
assert sorted(rv) == sorted(ref)
def test_nor(self, model):
"""A ⊽ B encoded as ¬A ∧ ¬B."""
ref = model.get(id=(False, A_or_B))
rv = model.get(id=(
(False, A, 0),
(False, B, 1)
))
assert sorted(rv) == sorted(ref)
def test_implication(self, model):
"""A → B encoded as ¬A ∨ B."""
ref = model.get(id=(False, Range(10, 15)))
rv = model.get(id=((False, A), B))
assert sorted(rv) == sorted(ref)
def test_converse_implication(self, model):
"""A ← B encoded as A ∨ ¬B."""
ref = model.get(id=(False, Range(20, 25)))
rv = model.get(id=(A, (False, B)))
assert sorted(rv) == sorted(ref)
def test_xor(self, model):
"""A ⊕ B encoded as (¬A ∨ ¬B) ∧ (A ∨ B)."""
ref = model.get(id=Range(10, 15)) + model.get(id=Range(20, 25))
rv = model.get(id=(
(False, A, 0), (False, B, 0),
(True, A, 1), (True, B, 1),
))
assert sorted(rv) == sorted(ref)
def test_biconditional(self, model):
"""A ↔ B encoded as (¬A ∨ B) ∧ (A ∨ ¬B)."""
ref = model.get(id=(False, A_or_B)) + model.get(id=A_and_B)
rv = model.get(id=(
(False, A, 0), (True, B, 0),
(True, A, 1), (False, B, 1),
))
assert sorted(rv) == sorted(ref)
def test_non_implication(self, model):
"""A ↛ B encoded as A ∨ ¬B."""
ref = model.get(id=Range(10, 15))
rv = model.get(id=(
(True, A, 0),
(False, B, 1)
))
assert sorted(rv) == sorted(ref)
def test_converse_non_implication(self, model):
"""A ↚ B encoded as ¬A ∨ B."""
ref = model.get(id=Range(20, 25))
rv = model.get(id=(
(False, A, 0),
(True, B, 1)
))
assert sorted(rv) == sorted(ref)
@pytest.mark.populate(10)
class TestIntervalIntersection:
"""Test some intersections."""
models = 'SQLiteModel',
def test_empty_intersection(self, model):
"""Empty intersection."""
rv = model.get(id=((True, Range(2, 4), 0), (True, Range(7, 9), 1)))
assert sorted(rv) == []
def test_union_without_intersection(self, model):
"""Union without intersection."""
ref = model.get(id=Range(2, 4)) + model.get(id=Range(7, 9))
rv = model.get(id=(Range(2, 4), Range(7, 9)))
assert sorted(rv) == sorted(ref)
|
'''Tool to generate computationally-rarefied graphs kmer spectra'''
import sys
import os
import scipy.stats
from optparse import OptionParser
import numpy as np
import ksatools
def fract(aa, epsilon, threshold):
'''Evaluates the fraction of theoretically-subsampled spectra
above a specified threshold. Dataset abundance is attenuated by
the factor epsilon. Returns a float beween 0 and 1. aa is a
two-column abudnance table, epsilon and threshold are floats.
'''
sys.stderr.write("E %f T %f\n" % (epsilon, threshold))
xr = aa[:, 0]
xn = aa[:, 1]
NO = np.sum(xn * xr)
p = 0.0
for i in range(len(xr)):
# this is the expected number of nonzero categories after hypergeometric sampling
# nonzero = (1.-scipy.stats.hypergeom.cdf(0.5, NO, xr[i], epsilon*NO))
nonzero = (1. - scipy.stats.hypergeom.pmf(0, NO, xr[i], epsilon * NO))
# For efficiency, don't evaluate if numerator is too small
# For numerical stability, don't evaluate term if denominator (nonzero) is too small
# note: second threshold (on nonzero) here creates kinks in the graph, but is important
if nonzero * xr[i] * xn[i] > 10E-0 and nonzero > 1E-2:
# and this is the expected number of above-threshold survivors
gt_thresh = 1. - \
scipy.stats.hypergeom.cdf(
threshold + 0.5, NO, xr[i], epsilon * NO)
interim = float(xn[i] * xr[i]) * (gt_thresh / nonzero)
if (not np.isnan(interim)) and (interim > 0):
p += interim
return p / NO
def rich(aa, epsilon, threshold):
sys.stderr.write("richness E %f T %f\n" % (epsilon, threshold))
xr = aa[:, 0]
xn = aa[:, 1]
NO = np.sum(xn * xr)
interim = 0
for i in range(len(xr)):
# this is the expected number of nonzero categories after hypergeometric sampling
# nonzero = (1.-scipy.stats.hypergeom.cdf(0.5, NO, xr[i], epsilon*NO))
nonzero = (1. - scipy.stats.hypergeom.pmf(0, NO, xr[i], epsilon * NO))
interim += nonzero * xn[i]
return interim
def calc_resampled_fraction(aa, samplefracs, thresholds):
'''calculate 2D array of return value of fract by evaluating it
for each fraction in samplefracs and each threshold in thresholds.
Returns 2d matrix sith shape = len(samplefracs), len(thresholds)
aa must be 2d ndarray
'''
assert aa.shape[1] == 2
matrix = np.zeros((len(samplefracs), len(thresholds)))
for i, frac in enumerate(samplefracs):
for j, threshold in enumerate(thresholds):
dummy = fract(aa, frac, threshold)
matrix[i][j] = dummy
return matrix
def calc_resampled_richness(aa, samplefracs, thresholds):
'''calculate 2D array, like calc_resampled_richness, of
calculated subsampled richness for each fraction in samplefracs
and each threshold in thresholds.
Returns 2d matrix sith shape = len(samplefracs), len(thresholds)
aa must be 2d ndarray
'''
assert aa.shape[1] == 2
matrix = np.zeros((len(samplefracs), len(thresholds)))
for i, frac in enumerate(samplefracs):
for j, threshold in enumerate(thresholds):
dummy = rich(aa, frac, threshold)
matrix[i][j] = dummy
return matrix
def plotme(b, label, color=None, thresholdlist=None, numplots=4,
suppress=False, dump=False, shaded=0, n=1):
'''Performs calculations and calls graphing routines,
given spectra
'''
import matplotlib.pyplot as plt
N = np.sum(b[:, 0] * b[:, 1])
samplefractions = 10**np.arange(2, 11, .5) / N # CHEAP
samplefractions = 10**np.arange(2, 11, .1) / N
samplefractions = np.hstack((samplefractions[samplefractions < 1], 1))
SHADED = shaded
if thresholdlist is None:
thresholdlist = [1]
if SHADED != 3:
matrix = calc_resampled_fraction(b, samplefractions, thresholdlist)
else:
matrix = calc_resampled_richness(b, samplefractions, thresholdlist)
effort = N * samplefractions
data = np.hstack([np.atleast_2d(effort).T, matrix])
headertext = "subsetsize\t" + "\t".join(map(str, thresholdlist))
with open(label + ".rare.csv", 'wb') as fp:
np.savetxt(fp, data, header=headertext, delimiter="\t")
if dump:
with open(label + ".rare.csv") as f:
for l in f:
print(l)
pex2 = np.hstack((effort[0], effort, effort[-1]))
pex = effort
for i in range(matrix.shape[1]):
aug2 = np.hstack((0, matrix[:, i], 0))
aug = matrix[:, i]
lab = str(thresholdlist[i]) + "x"
plt.grid(axis='both')
if SHADED == 0:
plt.title(label)
plt.semilogx(pex, aug, "-o", label=lab)
elif SHADED == 2:
lab = label + str(thresholdlist[i]) + "x"
lab = label
plt.semilogx(pex, aug, "-", label=lab, color=color)
plt.ylabel("Nonunique fraction of data")
elif SHADED == 3:
plt.semilogy(pex, aug, "-", label=lab, color=color)
plt.ylabel("Number of unique categories ")
plt.xlabel("Sampling effort")
elif SHADED == 1:
plt.subplot(numplots, 1, n + 1)
plt.semilogx(pex, aug, "-", label=lab, color=color)
plt.fill(pex2, aug2, "k", alpha=0.2)
plt.title(label)
plt.ylabel("Fraction of data")
else:
plt.semilogx(pex, aug, "-", label=lab)
plt.fill(pex2, aug2, "k", alpha=0.2)
plt.title(label)
plt.ylabel("Fraction of data")
plt.ylim((0, 1))
plt.xlim((1E4, 1E11))
if SHADED == 0 or n + 1 == numplots:
plt.xlabel("Sequencing effort (bp)")
else: # suppress drawing of x-axis labels for all but last plot
frame1 = plt.gca()
frame1.axes.get_xaxis().set_ticks([])
plt.tight_layout()
return()
|
from django.contrib.auth import get_user_model
from django.db import models
from imagekit.cachefiles import ImageCacheFile
from imagekit.registry import generator_registry
from imagekit.templatetags.imagekit import DEFAULT_THUMBNAIL_GENERATOR
from rest_framework import serializers
User = get_user_model()
class ThumbnailField(serializers.ImageField):
"""
Image field that returns an images url.
Pass get parameters to thumbnail the image.
Options are:
width: Specify the width (in pixels) to resize / crop to.
height: Specify the height (in pixels) to resize / crop to.
crop: Whether to crop or not [1,0]
anchor: Where to anchor the crop [t,r,b,l]
upscale: Whether to upscale or not [1,0]
If no options are specified the users avatar is returned.
To crop to 100x100 anchored to the top right:
?width=100&height=100&crop=1&anchor=tr
"""
def __init__(self, *args, **kwargs):
self.generator_id = kwargs.pop('generator_id', DEFAULT_THUMBNAIL_GENERATOR)
super(ThumbnailField, self).__init__(*args, **kwargs)
def get_generator_kwargs(self, query_params):
width = int(query_params.get('width', 0)) or None
height = int(query_params.get('height', 0)) or None
return {
'width': width,
'height': height,
'anchor': query_params.get('anchor', None),
'crop': query_params.get('crop', None),
'upscale': query_params.get('upscale', None)
}
def generate_thumbnail(self, source, **kwargs):
generator = generator_registry.get(
self.generator_id,
source=source,
**kwargs)
return ImageCacheFile(generator)
def to_native(self, image):
if not image.name:
return None
request = self.context.get('request', None)
if request is None:
return image.url
kwargs = self.get_generator_kwargs(request.query_params)
if kwargs.get('width') or kwargs.get('height'):
image = self.generate_thumbnail(image, **kwargs)
return request.build_absolute_uri(image.url)
class AvatarSerializer(serializers.ModelSerializer):
# Override default field_mapping to map ImageField to HyperlinkedImageField.
# As there is only one field this is the only mapping needed.
field_mapping = {
models.ImageField: ThumbnailField,
}
class Meta:
model = User
fields = ('avatar',)
|
"""This script is some boilerplate needed by Alembic to do its fancy database
migration stuff.
"""
import sys
sys.path.insert(0, '.')
from alembic import context
from logging.config import fileConfig
config = context.config
fileConfig(config.config_file_name)
from librarian_server import app, db
target_metadata = db.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode -- all we need is a URL.
"""
url = app.config['SQLALCHEMY_DATABASE_URI']
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode -- using the actual Librarian database
connection.
"""
with db.engine.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound, dragonfly.scene.bound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import Spyder
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
n += 1
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d
from bee.mstr import mstr
class parameters: pass
class myscene(dragonfly.pandahive.spyderframe):
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
camcenter = Spyder.Entity3D(
"camcenter",
(
Spyder.NewMaterial("white", color=(255, 255, 255)),
Spyder.Block3D((1, 1, 1), material="white"),
)
)
del a, box
class pandawalkhive(bee.inithive):
animation = dragonfly.scene.bound.animation()
walk = dragonfly.std.variable("str")("walk")
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
setPos = dragonfly.scene.bound.setPos()
setHpr = dragonfly.scene.bound.setHpr()
interval = dragonfly.time.interval_time(18)
connect(key_w, interval.start)
connect(key_s, interval.pause)
sequence = dragonfly.time.sequence(4)(8, 1, 8, 1)
connect(interval.value, sequence.inp)
ip1 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (0, -10, 0))
connect(sequence.outp1, ip1)
connect(ip1, setPos)
connect(key_w, ip1.start)
connect(key_s, ip1.stop)
ip2 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (180, 0, 0))
connect(sequence.outp2, ip2)
connect(ip2, setHpr)
connect(key_w, ip2.start)
connect(key_s, ip2.stop)
ip3 = dragonfly.time.interpolation("Coordinate")((0, -10, 0), (0, 0, 0))
connect(sequence.outp3, ip3)
connect(ip3, setPos)
connect(key_w, ip3.start)
connect(key_s, ip3.stop)
ip4 = dragonfly.time.interpolation("Coordinate")((180, 0, 0), (0, 0, 0))
connect(sequence.outp4, ip4)
connect(ip4, setHpr)
connect(key_w, ip4.start)
connect(key_s, ip4.stop)
connect(ip4.reach_end, interval.start)
from bee.staticbind import staticbind_baseclass
class pandawalkbind(dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = pandawalkhive
bind_entity = "relative"
bind_keyboard = "indirect"
class camerabindhive(bee.inithive):
interval = dragonfly.time.interval_time(30)
sequence = dragonfly.time.sequence(2)(1, 1)
connect(interval.value, sequence.inp)
startsensor = dragonfly.sys.startsensor()
ip1 = dragonfly.time.interpolation("Coordinate")((180, -20, 0), (360, -20, 0))
ip2 = dragonfly.time.interpolation("Coordinate")((0, -20, 0), (180, -20, 0))
connect(sequence.outp1, ip1.inp)
connect(sequence.outp2, ip2.inp)
connect(startsensor, interval.start)
connect(startsensor, ip1.start)
connect(ip1.reach_end, ip1.stop)
connect(ip1.reach_end, ip2.start)
connect(ip2.reach_end, ip2.stop)
connect(ip2.reach_end, ip1.start)
connect(ip2.reach_end, interval.start)
sethpr = dragonfly.scene.bound.setHpr()
connect(ip1, sethpr)
connect(ip2, sethpr)
class camerabind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = camerabindhive
class myhive(dragonfly.pandahive.pandahive):
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
camerabind = camerabind().worker()
camcenter = dragonfly.std.variable("id")("camcenter")
connect(camcenter, camerabind.bindname)
startsensor = dragonfly.sys.startsensor()
cam = dragonfly.scene.get_camera()
camparent = dragonfly.scene.unbound.parent()
connect(cam, camparent.entityname)
connect(camcenter, camparent.entityparentname)
connect(startsensor, camparent)
cphide = dragonfly.scene.unbound.hide()
connect(camcenter, cphide)
connect(startsensor, cphide)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id_gen = dragonfly.std.generator("id", id_generator)()
panda_id = dragonfly.std.variable("id")("")
t_panda_id_gen = dragonfly.std.transistor("id")()
connect(panda_id_gen, t_panda_id_gen)
connect(t_panda_id_gen, panda_id)
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
z_pandawalk = pandawalkbind().worker()
t_bind = dragonfly.std.transistor("id")()
connect(panda_id, t_bind)
connect(t_bind, z_pandawalk.bind)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
trig_spawn = dragonfly.std.pushconnector("trigger")()
connect(trig_spawn, t_panda_id_gen)
connect(trig_spawn, do_spawn)
connect(trig_spawn, t_bind)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, trig_spawn)
pandaicon_click = dragonfly.io.mouseareasensor("pandaicon")
connect(pandaicon_click, trig_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
wininit = bee.init("window")
wininit.camera.setPos(0, 45, 25)
wininit.camera.setHpr(180, -20, 0)
keyboardevents = dragonfly.event.sensor_match_leader("keyboard")
add_head = dragonfly.event.add_head()
head = dragonfly.std.variable("event")("spawnedpanda3")
connect(keyboardevents, add_head)
connect(head, add_head)
connect(add_head, z_pandawalk.event)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
main.run()
|
import sys, pygame, math, random, time
from Level import *
from Player import *
from Enemy import *
from NPC import *
from Menu import *
from Item import *
pygame.init()
clock = pygame.time.Clock()
width = 1000
height = 700
size = width, height
bgColor = r,b,g = 255,255,255
screen = pygame.display.set_mode(size)
mode = "menu"
enemies = pygame.sprite.Group()
boundries = pygame.sprite.Group()
backGrounds = pygame.sprite.Group()
people = pygame.sprite.Group()
items = pygame.sprite.Group()
players = pygame.sprite.Group()
all = pygame.sprite.OrderedUpdates()
Enemy.containers = (enemies, all)
SoftBlock.containers = (backGrounds, all)
HardBlock.containers = (boundries, all)
NPC.containers = (people, all)
Item.containers = (items, all)
Player.containers = (people, players, all)
levLayer =0
levx = 3
levy = 3
start = time.time()
def loadNewLev(direction, levx, levy):
if direction == "up":
if levy >1:
levy-=1
elif direction == "down":
if levy <3:
levy+=1
elif direction == "left":
if levx >1:
levx-=1
elif direction == "right":
if levx <3:
levx+=1
for s in all.sprites():
s.kill()
levFile = "Levels/map" + str(levLayer) + str(levy) + str(levx)
level=Level(levFile)
return levx, levy
while True:
while mode == "menu":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
mode = "game"
if event.key == pygame.K_2:
mode = "how to play"
if event.key == pygame.K_q:
mode = "quit"
bg = pygame.image.load("Resources/mainmenu.png")
bgrect = bg.get_rect(center = [width/2,height/2])
screen.fill(bgColor)
screen.blit(bg, bgrect)
pygame.display.flip()
clock.tick(60)
while mode == "how to play":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
mode = "menu"
bg = pygame.image.load("Resources/howtoplay.png")
bgrect = bg.get_rect(center = [width/2,height/1.9])
screen.fill(bgColor)
screen.blit(bg, bgrect)
pygame.display.flip()
clock.tick(60)
while mode == "quit":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
sys.exit()
levFile = "Levels/map" + str(levLayer) + str(levy) + str(levx)
level=Level(levFile)
player = Player([5,5], [900,500])
while mode == "test":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
levx, levy = loadNewLev("up", levx, levy)
elif event.key == pygame.K_s:
levx, levy = loadNewLev("down", levx, levy)
elif event.key == pygame.K_a:
levx, levy = loadNewLev("left", levx, levy)
elif event.key == pygame.K_d:
levx, levy = loadNewLev("right", levx, levy)
#print len(all.sprites())
bgColor = r,g,b
screen.fill(bgColor)
dirty = all.draw(screen)
pygame.display.update(dirty)
pygame.display.flip()
clock.tick(60)
while mode == "game":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w or event.key == pygame.K_UP:
player.go("up")
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
player.go("down")
elif event.key == pygame.K_a or event.key == pygame.K_LEFT:
player.go("left")
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
player.go("right")
elif event.type == pygame.KEYUP:
if event.key == pygame.K_w or event.key == pygame.K_UP:
player.go("stop up")
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
player.go("stop down")
elif event.key == pygame.K_a or event.key == pygame.K_LEFT:
player.go("stop left")
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
player.go("stop right")
all.update(size)
#print len(all.sprites())
#From Manpac V2
if player.rect.center[0] > size[0]:
levx, levy = loadNewLev("right", levx, levy)
player = Player([5,5], [0, player.rect.center[1]])
elif player.rect.center[0] < 0:
levx, levy = loadNewLev("left", levx, levy)
player = Player([5,5], [size[0], player.rect.center[1]])
elif player.rect.center[1] > size[1]:
levx, levy = loadNewLev("down", levx, levy)
player = Player([5,5], [player.rect.center[0], 0])
elif player.rect.center[1] < 0:
levx, levy = loadNewLev("up", levx, levy)
player = Player([5,5], [player.rect.center[0], size[1]])
playersHitsBoundries = pygame.sprite.groupcollide(players, boundries, False, False)
for p in playersHitsBoundries:
for boundry in playersHitsBoundries[p]:
p.collideHardblock(boundry)
#playersHitsItems = pygame.sprite.groupcollide(players, items, False, False)
#for p in playersHitsitems:
#for item in playersHitsitems[p]:
enemiesHitsBoundries = pygame.sprite.groupcollide(enemies, boundries, False, False)
for e in enemiesHitsBoundries:
for boundry in enemiesHitsBoundries[e]:
e.collideHardblock(boundry)
bgColor = r,g,b
screen.fill(bgColor)
dirty = all.draw(screen)
pygame.display.update(dirty)
pygame.display.flip()
clock.tick(60)
|
import flask
from flask import url_for
from .base import BaseTestCase
from . import utils
class TOCTestCase(BaseTestCase):
# TOC
def test_the_title_of_the_article_list_when_language_pt(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Português.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='pt_BR')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'pt_BR')
self.assertIn("Artigo Com Título Em Português", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_es(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Espanhol.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Título Del Artículo En Portugués",
response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_en(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Inglês.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Title In Portuguese", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não tem idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_unknow_language_for_article(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não conhece o idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_with_and_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original para artigos que não tem tradução e o título traduzido
quando tem tradução do título.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': []
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Article Y", response.data.decode('utf-8'))
self.assertIn("Título Del Artículo En Portugués", response.data.decode('utf-8'))
def test_ahead_of_print_is_displayed_at_table_of_contents(self):
"""
Teste para verificar se caso o issue for um ahead o valor da legenda bibliográfica é alterada para 'ahead of print'.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal, 'type': 'ahead'})
response = c.get(url_for('main.aop_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment))
self.assertIn("ahead of print", response.data.decode('utf-8'))
def test_abstract_links_are_displayed(self):
"""
Teste para verificar se caso o issue for um ahead o valor da
legenda bibliográfica é alterada para 'ahead of print'.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
_article_data = {
'title': 'Article Y',
'original_language': 'en',
'languages': ['es', 'pt', 'en'],
'issue': issue,
'journal': journal,
'abstract_languages': ["en", "es", "pt"],
'url_segment': '10-11',
'translated_titles': [
{'language': 'es', 'name': u'Artículo en español'},
{'language': 'pt', 'name': u'Artigo en Português'},
],
'pid': 'pidv2',
}
article = utils.makeOneArticle(_article_data)
response = c.get(url_for('main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment))
uris = [
url_for(
'main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
part='abstract',
lang=abstract_lang,
)
for abstract_lang in ["en", "es", "pt"]
]
for uri in uris:
with self.subTest(uri):
self.assertIn(uri, response.data.decode('utf-8'))
|
__author__ = 'Mark Worden'
from mi.core.log import get_logger
log = get_logger()
from mi.idk.config import Config
import unittest
import os
from mi.dataset.driver.adcps_jln.stc.adcps_jln_stc_recovered_driver import parse
from mi.dataset.dataset_driver import ParticleDataHandler
class SampleTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
sourceFilePath = os.path.join('mi', 'dataset', 'driver',
'adcps_jln', 'stc', 'resource',
'adcpt_20130929_091817.DAT')
particle_data_hdlr_obj = ParticleDataHandler()
particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePath, particle_data_hdlr_obj)
log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples)
log.debug("FAILURE: %s", particle_data_hdlr_obj._failure)
self.assertEquals(particle_data_hdlr_obj._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one()
|
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_bar10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar', 'subtype': 'percent_stacked'})
chart.axis_ids = [40274560, 40295040]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
from envs.common import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
STATIC_URL = 'http://%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME
COMPRESS_URL = STATIC_URL
FAVICON_URL = "%sfavicon.ico" % STATIC_URL
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = "backends.CachedS3BotoStorage"
COMPRESS_STORAGE = STATICFILES_STORAGE
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
|
"""Script for unittesting the mcpu module"""
import unittest
import itertools
from ganeti import compat
from ganeti import mcpu
from ganeti import opcodes
from ganeti import cmdlib
from ganeti import locking
from ganeti import constants
from ganeti.constants import \
LOCK_ATTEMPTS_TIMEOUT, \
LOCK_ATTEMPTS_MAXWAIT, \
LOCK_ATTEMPTS_MINWAIT
import testutils
REQ_BGL_WHITELIST = compat.UniqueFrozenset([
opcodes.OpClusterActivateMasterIp,
opcodes.OpClusterDeactivateMasterIp,
opcodes.OpClusterDestroy,
opcodes.OpClusterPostInit,
opcodes.OpClusterRename,
opcodes.OpInstanceRename,
opcodes.OpNodeAdd,
opcodes.OpNodeRemove,
opcodes.OpTestAllocator,
])
class TestLockAttemptTimeoutStrategy(unittest.TestCase):
def testConstants(self):
tpa = mcpu.LockAttemptTimeoutStrategy._TIMEOUT_PER_ATTEMPT
self.assert_(len(tpa) > LOCK_ATTEMPTS_TIMEOUT / LOCK_ATTEMPTS_MAXWAIT)
self.assert_(sum(tpa) >= LOCK_ATTEMPTS_TIMEOUT)
self.assertTrue(LOCK_ATTEMPTS_TIMEOUT >= 1800,
msg="Waiting less than half an hour per priority")
self.assertTrue(LOCK_ATTEMPTS_TIMEOUT <= 3600,
msg="Waiting more than an hour per priority")
def testSimple(self):
strat = mcpu.LockAttemptTimeoutStrategy(_random_fn=lambda: 0.5,
_time_fn=lambda: 0.0)
prev = None
for i in range(len(strat._TIMEOUT_PER_ATTEMPT)):
timeout = strat.NextAttempt()
self.assert_(timeout is not None)
self.assert_(timeout <= LOCK_ATTEMPTS_MAXWAIT)
self.assert_(timeout >= LOCK_ATTEMPTS_MINWAIT)
self.assert_(prev is None or timeout >= prev)
prev = timeout
for _ in range(10):
self.assert_(strat.NextAttempt() is None)
class TestDispatchTable(unittest.TestCase):
def test(self):
for opcls in opcodes.OP_MAPPING.values():
if not opcls.WITH_LU:
continue
self.assertTrue(opcls in mcpu.Processor.DISPATCH_TABLE,
msg="%s missing handler class" % opcls)
# Check against BGL whitelist
lucls = mcpu.Processor.DISPATCH_TABLE[opcls]
if lucls.REQ_BGL:
self.assertTrue(opcls in REQ_BGL_WHITELIST,
msg=("%s not whitelisted for BGL" % opcls.OP_ID))
else:
self.assertFalse(opcls in REQ_BGL_WHITELIST,
msg=("%s whitelisted for BGL, but doesn't use it" %
opcls.OP_ID))
class TestProcessResult(unittest.TestCase):
def setUp(self):
self._submitted = []
self._count = itertools.count(200)
def _Submit(self, jobs):
job_ids = [self._count.next() for _ in jobs]
self._submitted.extend(zip(job_ids, jobs))
return job_ids
def testNoJobs(self):
for i in [object(), [], False, True, None, 1, 929, {}]:
self.assertEqual(mcpu._ProcessResult(NotImplemented, NotImplemented, i),
i)
def testDefaults(self):
src = opcodes.OpTestDummy()
res = mcpu._ProcessResult(self._Submit, src, cmdlib.ResultWithJobs([[
opcodes.OpTestDelay(),
opcodes.OpTestDelay(),
], [
opcodes.OpTestDelay(),
]]))
self.assertEqual(res, {
constants.JOB_IDS_KEY: [200, 201],
})
(_, (op1, op2)) = self._submitted.pop(0)
(_, (op3, )) = self._submitted.pop(0)
self.assertRaises(IndexError, self._submitted.pop)
for op in [op1, op2, op3]:
self.assertTrue("OP_TEST_DUMMY" in op.comment)
self.assertFalse(hasattr(op, "priority"))
self.assertFalse(hasattr(op, "debug_level"))
def testParams(self):
src = opcodes.OpTestDummy(priority=constants.OP_PRIO_HIGH,
debug_level=3)
res = mcpu._ProcessResult(self._Submit, src, cmdlib.ResultWithJobs([[
opcodes.OpTestDelay(priority=constants.OP_PRIO_LOW),
], [
opcodes.OpTestDelay(comment="foobar", debug_level=10),
]], other=True, value=range(10)))
self.assertEqual(res, {
constants.JOB_IDS_KEY: [200, 201],
"other": True,
"value": range(10),
})
(_, (op1, )) = self._submitted.pop(0)
(_, (op2, )) = self._submitted.pop(0)
self.assertRaises(IndexError, self._submitted.pop)
self.assertEqual(op1.priority, constants.OP_PRIO_LOW)
self.assertTrue("OP_TEST_DUMMY" in op1.comment)
self.assertEqual(op1.debug_level, 3)
self.assertEqual(op2.priority, constants.OP_PRIO_HIGH)
self.assertEqual(op2.comment, "foobar")
self.assertEqual(op2.debug_level, 3)
class _FakeLuWithLocks:
def __init__(self, needed_locks, share_locks):
self.needed_locks = needed_locks
self.share_locks = share_locks
class _FakeGlm:
def __init__(self, owning_nal):
self._owning_nal = owning_nal
def check_owned(self, level, names):
assert level == locking.LEVEL_NODE_ALLOC
assert names == locking.NAL
return self._owning_nal
def owning_all(self, level):
return False
class TestVerifyLocks(unittest.TestCase):
def testNoLocks(self):
lu = _FakeLuWithLocks({}, {})
glm = _FakeGlm(False)
mcpu._VerifyLocks(lu, glm,
_mode_whitelist=NotImplemented,
_nal_whitelist=NotImplemented)
def testNotAllSameMode(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: ["foo"],
}, {
level: 0,
locking.LEVEL_NODE_ALLOC: 0,
})
glm = _FakeGlm(False)
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
def testDifferentMode(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: ["foo"],
}, {
level: 0,
locking.LEVEL_NODE_ALLOC: 1,
})
glm = _FakeGlm(False)
try:
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
except AssertionError, err:
self.assertTrue("using the same mode as nodes" in str(err))
else:
self.fail("Exception not raised")
# Once more with the whitelist
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[_FakeLuWithLocks],
_nal_whitelist=[])
def testSameMode(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: ["foo"],
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}, {
level: 1,
locking.LEVEL_NODE_ALLOC: 1,
})
glm = _FakeGlm(True)
try:
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[_FakeLuWithLocks],
_nal_whitelist=[])
except AssertionError, err:
self.assertTrue("whitelisted to use different modes" in str(err))
else:
self.fail("Exception not raised")
# Once more without the whitelist
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
def testAllWithoutAllocLock(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: locking.ALL_SET,
}, {
level: 0,
locking.LEVEL_NODE_ALLOC: 0,
})
glm = _FakeGlm(False)
try:
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
except AssertionError, err:
self.assertTrue("allocation lock must be used if" in str(err))
else:
self.fail("Exception not raised")
# Once more with the whitelist
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[],
_nal_whitelist=[_FakeLuWithLocks])
def testAllWithAllocLock(self):
for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
lu = _FakeLuWithLocks({
level: locking.ALL_SET,
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}, {
level: 0,
locking.LEVEL_NODE_ALLOC: 0,
})
glm = _FakeGlm(True)
try:
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[],
_nal_whitelist=[_FakeLuWithLocks])
except AssertionError, err:
self.assertTrue("whitelisted for not acquiring" in str(err))
else:
self.fail("Exception not raised")
# Once more without the whitelist
mcpu._VerifyLocks(lu, glm, _mode_whitelist=[], _nal_whitelist=[])
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.base import (clone, TransformerMixin, ClusterMixin,
BaseEstimator, is_classifier, is_regressor)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.linear_model.stochastic_gradient import BaseSGD
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter, _num_samples
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
yield check_complex_data
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "ComplementNB", "LabelPropagation",
"LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
yield check_supervised_y_no_nan
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(estimator, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
estimator.fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised error as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, estimator):
for check in _yield_non_meta_checks(name, estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(name, estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(name, estimator):
yield check
if isinstance(estimator, TransformerMixin):
for check in _yield_transformer_checks(name, estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(name, estimator):
yield check
yield check_fit2d_predict1d
if name != 'GaussianProcess': # FIXME
# XXX GaussianProcess deprecated in 0.20
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d
yield check_get_params_invariance
yield check_dict_unchanged
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
"""
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
check_no_fit_attributes_set_in_init(name, Estimator)
estimator = Estimator()
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
for check in _yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"
and not isinstance(estimator, BaseSGD)):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
if name in ['Scaler', 'StandardScaler']:
estimator = clone(estimator).set_params(with_mean=False)
else:
estimator = clone(estimator)
# fit and predict
try:
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
if has_fit_parameter(estimator_orig, "sample_weight"):
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(estimator, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_complex_data(name, estimator_orig):
# check that estimators raise an exception on providing complex data
X = np.random.sample(10) + 1j * np.random.sample(10)
X = X.reshape(-1, 1)
y = np.random.sample(10) + 1j * np.random.sample(10)
estimator = clone(estimator_orig)
assert_raises_regex(ValueError, "Complex data not supported",
estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# Check that fitting a 2d array with only one sample either works or
# returns an informative message. The error message should either mention
# the number of samples or the number of classes.
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
msgs = ["1 sample", "n_samples = 1", "n_samples=1", "one sample",
"1 class", "one class"]
try:
estimator.fit(X, y)
except ValueError as e:
if all(msg not in repr(e) for msg in msgs):
raise e
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check fitting a 2d array with only 1 feature either works or returns
# informative message
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
# ensure two labels in subsample for RandomizedLogisticRegression
if name == 'RandomizedLogisticRegression':
estimator.sample_fraction = 1
# ensure non skipped trials for RANSACRegressor
if name == 'RANSACRegressor':
estimator.residual_threshold = 0.5
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator, 1)
msgs = ["1 feature(s)", "n_features = 1", "n_features=1"]
try:
estimator.fit(X, y)
except ValueError as e:
if all(msg not in repr(e) for msg in msgs):
raise e
@ignore_warnings
def check_fit1d(name, estimator_orig):
# check fitting 1d X array raises a ValueError
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
assert_raises(ValueError, estimator.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_general(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, transformer, X, y)
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformers_unfitted(name, transformer):
X, y = _boston_subset()
transformer = clone(transformer)
with assert_raises((AttributeError, ValueError), msg="The unfitted "
"transformer {} does not raise an error when "
"transform is called. Perhaps use "
"check_is_fitted in transform.".format(name)):
transformer.transform(X)
def _check_transformer(name, transformer_orig, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred, x_pred2, atol=1e-2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer)
assert_allclose_dense_sparse(
x_pred, x_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
else:
assert_allclose_dense_sparse(
X_pred, X_pred2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer, atol=1e-2)
assert_allclose_dense_sparse(
X_pred, X_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
assert_equal(_num_samples(X_pred2), n_samples)
assert_equal(_num_samples(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
with assert_raises(ValueError, msg="The transformer {} does "
"not raise an error when the number of "
"features in transform is different from"
" the number of features in "
"fit.".format(name)):
transformer.transform(X.T)
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
with assert_raises(ValueError, msg="The estimator {} does not"
" raise an error when an empty data is used "
"to train. Perhaps use "
"check_array in train.".format(name)):
e.fit(X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(e, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, estimator)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
estimator = clone(estimator_orig)
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, 'partial_fit'):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
with assert_raises(ValueError,
msg="The estimator {} does not raise an"
" error when the number of features"
" changes between calls to "
"partial_fit.".format(name)):
estimator.partial_fit(X[:, :-1], y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_clustering(name, clusterer_orig):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == 'AffinityPropagation':
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert_equal(pred.shape, (n_samples,))
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert_in(pred.dtype, [np.dtype('int32'), np.dtype('int64')])
assert_in(pred2.dtype, [np.dtype('int32'), np.dtype('int64')])
# There should be at least one sample in every cluster. Equivalently
# labels_ should contain all the consecutive values between its
# min and its max.
pred_sorted = np.unique(pred)
assert_array_equal(pred_sorted, np.arange(pred_sorted[0],
pred_sorted[-1] + 1))
# labels_ should be greater than -1
assert_greater_equal(pred_sorted[0], -1)
# labels_ should be less than n_clusters - 1
if hasattr(clusterer, 'n_clusters'):
n_clusters = getattr(clusterer, 'n_clusters')
assert_greater_equal(n_clusters - 1, pred_sorted[-1])
# else labels_ should be less than max(labels_) which is necessarily true
@ignore_warnings(category=DeprecationWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=DeprecationWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
classifier = clone(classifier_orig)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, classifier_orig):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']:
X -= X.min()
set_random_state(classifier)
# raises error on malformed input for fit
with assert_raises(ValueError, msg="The classifer {} does not"
" raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of labels."
" Perhaps use check_X_y in fit.".format(name)):
classifier.fit(X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when the number of features "
"in predict is different from the number of"
" features in fit.".format(name)):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes == 3 and
# 1on1 of LibSVM works differently
not isinstance(classifier, BaseLibSVM)):
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
with assert_raises(ValueError, msg="The classifier {} does"
" not raise an error when the number of "
"features in decision_function is "
"different from the number of features"
" in fit.".format(name)):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_allclose(np.sum(y_prob, axis=1), np.ones(n_samples))
# raises error on malformed input for predict_proba
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when the number of features "
"in predict_proba is different from the number "
"of features in fit.".format(name)):
classifier.predict_proba(X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_fit_returns_self(name, estimator_orig):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
# some want non-negative input
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = clone(estimator_orig)
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_2d(name, estimator_orig):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_classifiers_classes(name, classifier_orig):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
classifier = clone(classifier_orig)
if name == 'BernoulliNB':
X = X > X.mean()
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
if name != "ComplementNB":
# This is a pathological data set for ComplementNB.
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_int(name, regressor_orig):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_train(name, regressor_orig):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, y)
rnd = np.random.RandomState(0)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
with assert_raises(ValueError, msg="The classifer {} does not"
" raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of "
"labels. Perhaps use check_X_y in fit.".format(name)):
regressor.fit(X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, X[:, 0])
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_classifiers(name, classifier_orig):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest("Not testing NuSVC class weight as it is ignored.")
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(
class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# XXX: Generally can use 0.89 here. On Windows, LinearSVC gets
# 0.88 (Issue #9111)
assert_greater(np.mean(y_pred == 0), 0.87)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_classifiers(name, classifier_orig, X_train,
y_train, X_test, y_test, weights):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(coef_balanced, coef_manual)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_overwrite_params(name, estimator_orig):
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
# this check works on classes, not instances
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=DeprecationWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_data_not_an_array(name, estimator_orig, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest("Skipping check_estimators_data_not_an_array "
"for cross decomposition module as estimators "
"are not deterministic.")
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# this check works on classes, not instances
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
if (issubclass(Estimator, BaseSGD) and
init_param.name in ['tol', 'max_iter']):
# To remove in 0.21, when they get their future default values
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default, init_param.name)
def multioutput_estimator_convert_y_2d(estimator, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in estimator.__class__.__name__:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = clone(estimator_orig).set_params(alpha=0.)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(estimator, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = clone(estimator_orig)
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = clone(estimator_orig)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
|
import sys
from aardvark_py import *
BUFFER_SIZE = 2048
SPI_BITRATE = 1000
def blast_bytes (handle, filename):
# Open the file
try:
f=open(filename, 'rb')
except:
print "Unable to open file '" + filename + "'"
return
trans_num = 0
while 1:
# Read from the file
filedata = f.read(BUFFER_SIZE)
if (len(filedata) == 0):
break
# Write the data to the bus
data_out = array('B', filedata)
data_in = array_u08(len(data_out))
(count, data_in) = aa_spi_write(handle, data_out, data_in)
if (count < 0):
print "error: %s" % aa_status_string(count)
break
elif (count != len(data_out)):
print "error: only a partial number of bytes written"
print " (%d) instead of full (%d)" % (count, num_write)
sys.stdout.write("*** Transaction #%02d\n" % trans_num)
sys.stdout.write("Data written to device:")
for i in range(count):
if ((i&0x0f) == 0):
sys.stdout.write("\n%04x: " % i)
sys.stdout.write("%02x " % (data_out[i] & 0xff))
if (((i+1)&0x07) == 0):
sys.stdout.write(" ")
sys.stdout.write("\n\n")
sys.stdout.write("Data read from device:")
for i in range(count):
if ((i&0x0f) == 0):
sys.stdout.write("\n%04x: " % i)
sys.stdout.write("%02x " % (data_in[i] & 0xff))
if (((i+1)&0x07) == 0):
sys.stdout.write(" ")
sys.stdout.write("\n\n")
trans_num = trans_num + 1
# Sleep a tad to make sure slave has time to process this request
aa_sleep_ms(10)
f.close()
if (len(sys.argv) < 4):
print "usage: aaspi_file PORT MODE filename"
print " mode 0 : pol = 0, phase = 0"
print " mode 1 : pol = 0, phase = 1"
print " mode 2 : pol = 1, phase = 0"
print " mode 3 : pol = 1, phase = 1"
print ""
print " 'filename' should contain data to be sent"
print " to the downstream spi device"
sys.exit()
port = int(sys.argv[1])
mode = int(sys.argv[2])
filename = sys.argv[3]
handle = aa_open(port)
if (handle <= 0):
print "Unable to open Aardvark device on port %d" % port
print "Error code = %d" % handle
sys.exit()
aa_configure(handle, AA_CONFIG_SPI_I2C)
aa_target_power(handle, AA_TARGET_POWER_BOTH)
aa_spi_configure(handle, mode >> 1, mode & 1, AA_SPI_BITORDER_MSB)
bitrate = aa_spi_bitrate(handle, SPI_BITRATE)
print "Bitrate set to %d kHz" % bitrate
blast_bytes(handle, filename)
aa_close(handle)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myblog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message_from_me', models.TextField()),
('subject', models.CharField(max_length=33)),
('message_from_user', models.TextField()),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('link', models.URLField()),
('image', models.ImageField(default=None, upload_to='myblog/image/project')),
('detail', models.TextField()),
('created_on', models.DateTimeField()),
],
),
migrations.CreateModel(
name='SocialSite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site_name', models.CharField(max_length=10)),
('link', models.URLField()),
],
options={
'verbose_name_plural': 'Social Sites',
},
),
]
|
from django.core.management.base import BaseCommand
from ...territori.models import Territorio
class Command(BaseCommand):
help = 'Fix for provincie autonome'
def handle(self, *args, **options):
Territorio.objects.regioni().get(denominazione='TRENTINO-ALTO ADIGE/SUDTIROL').delete()
for name in ['BOLZANO', 'TRENTO']:
territorio = Territorio.objects.provincie().get(denominazione__istartswith=name)
territorio.pk = None
territorio.tipo = Territorio.TIPO.R
territorio.cod_reg = territorio.cod_prov
territorio.cod_prov = None
territorio.denominazione = 'P.A. DI {}'.format(name)
territorio.slug = None
territorio.save()
Territorio.objects.provincie().filter(cod_prov=territorio.cod_reg).update(cod_reg=territorio.cod_reg)
Territorio.objects.comuni().filter(cod_prov=territorio.cod_reg).update(cod_reg=territorio.cod_reg)
|
from django.contrib import admin
from .models import dynamic_models
admin.site.register(dynamic_models.values())
|
from __future__ import print_function
import numpy as np
import sys
import mesh.patch as patch
import compressible_sr.eos as eos
from util import msg
def init_data(my_data, rp):
""" initialize the bubble problem """
msg.bold("initializing the bubble problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in bubble.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
gamma = rp.get_param("eos.gamma")
grav = rp.get_param("compressible.grav")
scale_height = rp.get_param("bubble.scale_height")
dens_base = rp.get_param("bubble.dens_base")
dens_cutoff = rp.get_param("bubble.dens_cutoff")
x_pert = rp.get_param("bubble.x_pert")
y_pert = rp.get_param("bubble.y_pert")
r_pert = rp.get_param("bubble.r_pert")
pert_amplitude_factor = rp.get_param("bubble.pert_amplitude_factor")
# initialize the components, remember, that ener here is
# rho*eint + 0.5*rho*v**2, where eint is the specific
# internal energy (erg/g)
xmom[:, :] = 0.0
ymom[:, :] = 0.0
dens[:, :] = dens_cutoff
# set the density to be stratified in the y-direction
myg = my_data.grid
p = myg.scratch_array()
cs2 = scale_height*abs(grav)
for j in range(myg.jlo, myg.jhi+1):
dens[:, j] = max(dens_base*np.exp(-myg.y[j]/scale_height),
dens_cutoff)
if j == myg.jlo:
p[:, j] = dens[:, j]*cs2
else:
p[:, j] = p[:, j-1] + 0.5*myg.dy*(dens[:, j] + dens[:, j-1])*grav
# set the energy (P = cs2*dens)
ener[:, :] = p[:, :]/(gamma - 1.0) + \
0.5*(xmom[:, :]**2 + ymom[:, :]**2)/dens[:, :]
r = np.sqrt((myg.x2d - x_pert)**2 + (myg.y2d - y_pert)**2)
idx = r <= r_pert
# boost the specific internal energy, keeping the pressure
# constant, by dropping the density
eint = (ener[idx] - 0.5*(xmom[idx]**2 - ymom[idx]**2)/dens[idx])/dens[idx]
pres = dens[idx]*eint*(gamma - 1.0)
eint = eint*pert_amplitude_factor
dens[idx] = pres/(eint*(gamma - 1.0))
ener[idx] = dens[idx]*eint + 0.5*(xmom[idx]**2 + ymom[idx]**2)/dens[idx]
# p[idx] = pres
rhoh = eos.rhoh_from_rho_p(gamma, dens, p)
W = 1 / (np.sqrt(1-(xmom**2-ymom**2)/dens))
dens[:, :] *= W
xmom[:, :] *= rhoh[:, :]/dens*W**2
ymom[:, :] *= rhoh[:, :]/dens*W**2
# HACK: didn't work but W = 1 so shall cheat
ener[:, :] = rhoh[:, :]*W**2 - p - dens[:, :]
# ener[:, :] = p / (gamma-1)
# print(ener[:,myg.jlo:myg.jhi])#*W[:,myg.jlo:myg.jhi]**2)
# exit()
def finalize():
""" print out any information to the user at the end of the run """
pass
|
import os
import pygame
import sys
import threading, time
from pygame.locals import *
import logging
log = logging.getLogger('pytality.term.pygame')
log.debug("pygame version: %r", pygame.version.ver)
"""
A mapping of special keycodes into representative strings.
Based off the keymap in WConio, but with 'alt', 'ctrl', and 'shift'
stripped in order to be portable with the other pytality backends.
"""
key_map = {
K_RETURN: 'enter',
K_F1 : 'f1',
K_F2 : 'f2',
K_F3 : 'f3',
K_F4 : 'f4',
K_F5 : 'f5',
K_F6 : 'f6',
K_F7 : 'f7',
K_F8 : 'f8',
K_F9 : 'f9',
K_F10 : 'f10',
K_INSERT : 'ins',
K_DELETE : 'del',
K_HOME : 'home',
K_END : 'end',
K_PAGEDOWN : 'pgdn',
K_PAGEUP : 'pgup',
K_DOWN : 'down',
K_LEFT : 'left',
K_RIGHT : 'right',
K_UP : 'up',
}
if hasattr(sys, 'frozen'):
base_path = os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), 'data')
else:
base_path = os.path.join(os.path.dirname(__file__), 'silverlight_html', 'images')
W = 8
H = 12
sprites = {}
quit = False
cursor_thread = None
replaced_character = None
cursor_x = 0
cursor_y = 0
cursor_type = None
class CursorThread(threading.Thread):
def __init__(self, *args, **kwargs):
super(CursorThread, self).__init__(*args, **kwargs)
self.quitEvent = threading.Event()
def run(self):
blink = True
while True:
blink = not blink
try:
pygame.event.post(pygame.event.Event(USEREVENT, blink=blink))
except pygame.error:
return
if self.quitEvent.wait(timeout=0.5):
break
def init(use_cp437=True, blink=False):
pygame.init()
#There are several kinds of event we are patently not interested in
pygame.event.set_blocked([
MOUSEBUTTONUP,
JOYAXISMOTION, JOYBALLMOTION, JOYHATMOTION, JOYBUTTONDOWN, JOYBUTTONUP,
#we only need KEYDOWN
KEYUP
])
pygame.mouse.set_visible(False)
#prepare the raw_getkey generator
prepare_raw_getkey()
global quit
quit = False
#spawn a blinky-cursor manager
global cursor_thread, replaced_character, cursor_x, cursor_y, cursor_type
cursor_x = 0
cursor_y = 0
replaced_character = None
cursor_type = None
if blink:
cursor_thread = CursorThread()
cursor_thread.daemon = True
cursor_thread.start()
def load_sprites():
if 'bg' in sprites:
#we only need to load once
return
def load_image(key_name, *filepath):
full_path = os.path.join(base_path, *filepath)
surface = pygame.image.load(full_path).convert_alpha()
sprites[key_name] = surface
load_image('bg', 'colors.png')
for color_id in range(16):
load_image(color_id, 'char', '%s.png' % color_id)
def blink_cursor(event):
global replaced_character
if event.blink:
replace_character()
else:
restore_character()
def replace_character():
global replaced_character
if not cursor_type:
return
fg, bg, ch = get_at(cursor_x, cursor_y)
replaced_character = (cursor_x, cursor_y, fg, bg, ch)
new_fg = 15
if bg == 15:
new_fg = 7
blit_at(cursor_x, cursor_y, new_fg, bg, cursor_type)
pygame.display.flip()
def restore_character():
global replaced_character
if not replaced_character:
return
x, y, fg, bg, ch = replaced_character
blit_at(x, y, fg, bg, ch)
pygame.display.flip()
replaced_character = None
def flip():
#keep the event queue happy
for event in pygame.event.get([
#this should be all the event types we aren't blocking
#and aren't about keyboard input
QUIT,
ACTIVEEVENT,
VIDEORESIZE,
VIDEOEXPOSE,
USEREVENT
]):
if event.type == QUIT:
raise KeyboardInterrupt()
elif event.type == USEREVENT:
blink_cursor(event)
else:
#we don't actually care
pass
#flip the screen
pygame.display.flip()
def clear():
if quit:
return
screen.fill((0, 0, 0))
global cell_data
cell_data = [
[
[0, 0, ' ']
for cell in range(max_x)
]
for row in range(max_y)
]
def resize(width, height):
global screen
screen = pygame.display.set_mode((width*W, height*H))
#we don't use alpha, and turning it off makes it a tad faster
screen.set_alpha(None)
#load the console images to blit later
load_sprites()
#set our max dimensions
global max_x, max_y
max_x, max_y = width, height
clear()
flip()
def reset():
pygame.display.quit()
global quit
quit = True
if cursor_thread:
cursor_thread.quitEvent.set()
cursor_thread.join()
def move_cursor(x, y):
global cursor_x, cursor_y
restore_character()
cursor_x = x
cursor_y = y
replace_character()
def set_title(title):
pygame.display.set_caption(title)
def set_cursor_type(i):
global cursor_type
cursor_map = {
0: None,
1: '_',
2: chr(0xDB)
}
restore_character()
cursor_type = cursor_map[i]
def cache_sprite(fg, bg, ch):
bg_sprite = sprites['bg']
fg_sprite = sprites[fg]
index = ord(ch)
#coordinates on the bg sprite map
bg_x = bg * W
#coordinates on the fg sprite map
fg_x = (index % 16) * W
fg_y = int(index / 16) * H
cell_sprite = pygame.Surface((W, H))
#voodoo: this helps a little bit.
cell_sprite.set_alpha(None)
#blit the background and foreground to the cell
cell_sprite.blit(bg_sprite, dest=(0, 0), area=pygame.Rect(bg_x, 0, W, H))
cell_sprite.blit(fg_sprite, dest=(0, 0), area=pygame.Rect(fg_x, fg_y, W, H))
sprites[(fg, bg, ch)] = cell_sprite
return cell_sprite
def blit_at(x, y, fg, bg, ch):
#blit one character to the screen.
#because function calls are pricey, this is also inlined (ew) in draw_buffer, so the contents are kept short.
#coordinates on the screen
screen_x = x * W
screen_y = y * H
#cache each (bg, fg, index) cell we draw into a surface so it's easier to redraw.
#it's a little bit of a memory waste, and takes longer on the first draw, but we're dealing with ascii here
#so there's probably a lot of reuse.
try:
cell_sprite = sprites[(fg, bg, ch)]
except KeyError:
#make a new one
cell_sprite = cache_sprite(fg, bg, ch)
#blit the cell to the screen
screen.blit(cell_sprite, dest=(screen_x, screen_y))
def draw_buffer(source, start_x, start_y):
"""
render the buffer to our backing.
This is a hotpath, and there's more microoptimization here than i'd like, but FPS is kindof important.
"""
y = start_y
#lookups we can cache into locals
#i know, it's such a microoptimization, but this path qualifies as hot
local_cell_data, local_sprites, local_screen_blit = cell_data, sprites, screen.blit
local_W, local_H = W, H
screen_width, screen_height = max_x, max_y
source_width = source.width
is_overlay = source.is_overlay
for row in source._data:
if y < 0:
y += 1
continue
if y >= screen_height:
break
x = start_x
#do something analogous to row[:source.width]
#but without the pointless copy that requires
w = 0
for fg, bg, ch in row:
if x >= screen_width or w >= source_width:
break
if x >= 0:
#no need to blit if it's already identical
old_data = local_cell_data[y][x]
new_data = [fg, bg, ch]
if new_data != old_data and not (is_overlay and ch == ' '):
#draw it and remember the info for our cache
#this used to call blit_at but now it's inline.
try:
cell_sprite = sprites[(fg, bg, ch)]
except KeyError:
#make a new one
cell_sprite = cache_sprite(fg, bg, ch)
#blit the cell to the screen
local_screen_blit(cell_sprite, dest=(x*local_W, y*local_H))
#remember the info for the cache
local_cell_data[y][x] = new_data
x += 1
w += 1
y += 1
source.dirty = False
return
def get_at(x, y):
if x < 0 or x >= max_x or y < 0 or y >= max_y:
raise ValueError("get_at: Invalid coordinate (%r, %r)" % (x,y))
global cell_data
return cell_data[y][x]
def prepare_raw_getkey():
"""
It looks like pygame fully intends for you to process _all_ keyboard input at the moment you
look at the event queue.
That won't do here. so we turn raw_getkey into a generator.
Worse, pygame.event.wait() can't filter by type and removes the event from the queue,
so we have to keep re-adding events we didn't want in the first place. Ugh.
"""
#this is weird - pygame turns off keyboard repeat by default, which you can re-enable
#by setting a delay in ms, but "what the system normally does" is not an option.
#it seems like 150ms delay and 15 keys-per-second is normalish.
pygame.key.set_repeat(150, 1000 / 15)
global raw_getkey
def translate(event):
if event.type == MOUSEMOTION:
x, y = event.pos
return ("mouse_motion", x / W, y / H)
if event.type == KEYDOWN:
log.debug("key event: %r", event.dict)
if event.key in key_map:
return key_map[event.key]
return event.unicode
if event.type == MOUSEBUTTONDOWN:
x, y = event.pos
return ("mouse_down", x / W, y / H)
def keypump():
items = []
event_types = [MOUSEMOTION, KEYDOWN, MOUSEBUTTONDOWN]
while True:
if not items:
if pygame.event.peek(event_types):
#there's keyboard input pending! great!
items.extend(pygame.event.get(event_types))
else:
#there's no keyboard input pending, so we need to take a nap until there is.
#if we get an event we dont care about, we have to put it back
#but if we put it back, .wait() will give it right back to us
#so we have to keep it around until we find what we want, then re-add it.
#ugh.
ignored_items = []
while True:
item = pygame.event.wait()
if item.type == USEREVENT:
blink_cursor(item)
elif item.type not in event_types:
ignored_items.append(item)
else:
items.append(item)
break
for ignored_item in ignored_items:
pygame.event.post(ignored_item)
yield translate(items.pop(0))
#assign the generator's next() method as raw_getkey
raw_getkey = keypump().next
|
"""Benchmark Walk algorithm"""
import numpy as np
import bench
import obsoper.walk
class BenchmarkWalk(bench.Suite):
def setUp(self):
longitudes, latitudes = np.meshgrid([1, 2, 3],
[1, 2, 3],
indexing="ij")
self.fixture = obsoper.walk.Walk.from_lonlats(longitudes,
latitudes)
def bench_detect(self):
for _ in range(10):
self.fixture.detect((2.9, 2.9), i=0, j=0)
|
"""flatty - marshaller/unmarshaller for light-schema python objects"""
VERSION = (0, 1, 2)
__version__ = ".".join(map(str, VERSION))
__author__ = "Christian Haintz"
__contact__ = "christian.haintz@orangelabs.at"
__homepage__ = "http://packages.python.org/flatty"
__docformat__ = "restructuredtext"
from flatty import *
try:
import mongo
except ImportError:
pass
try:
import couch
except ImportError:
pass
|
from django.db import models
SETTING_NAME = (
('conf_space', 'Confluence Space Key'),
('conf_page', 'Confluence Page'),
('jira_project', 'JIRA Project Code Name'),
('github_project', 'GitHub Project'),
)
class AppSettings(models.Model):
name = models.CharField(max_length=50,
primary_key=True,
choices=SETTING_NAME)
content = models.CharField(max_length=255)
class Meta:
verbose_name_plural = "settings"
|
import sys
from . import main
if __name__ == '__main__':
sys.exit(main.main())
|
"""
Principal search specific constants.
"""
__version__ = "$Revision-Id:$"
SEARCH_MODE_USER_ONLY = 0
SEARCH_MODE_GROUP_ONLY = 1
SEARCH_MODE_USER_AND_GROUP = 2
ALL_PRINCIPAL = "____allprincipal____"
AUTHENTICATED_PRINCIPAL = "____authenticatedprincipal____"
UNAUTHENTICATED_PRINCIPAL = "____unauthenticatedprincipal____"
OWNER_PRINCIPAL = "____ownerprincipal____"
USER_PRINCIPAL_TYPE = "____user____"
GROUP_PRINCIPAL_TYPE = "____group____"
|
<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
<class>Dialog</class>
<widget class="QDialog" name="Dialog">
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>400</width>
<height>300</height>
</rect>
</property>
<property name="windowTitle">
<string>Dialog</string>
</property>
<widget class="QDialogButtonBox" name="buttonBox">
<property name="geometry">
<rect>
<x>30</x>
<y>240</y>
<width>341</width>
<height>32</height>
</rect>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="standardButtons">
<set>QDialogButtonBox::Cancel|QDialogButtonBox::Ok</set>
</property>
</widget>
<widget class="QListWidget" name="lMetaData">
<property name="geometry">
<rect>
<x>10</x>
<y>10</y>
<width>256</width>
<height>192</height>
</rect>
</property>
</widget>
<widget class="QPushButton" name="bAdd">
<property name="geometry">
<rect>
<x>280</x>
<y>10</y>
<width>115</width>
<height>32</height>
</rect>
</property>
<property name="text">
<string>Add</string>
</property>
</widget>
</widget>
<resources/>
<connections>
<connection>
<sender>buttonBox</sender>
<signal>accepted()</signal>
<receiver>Dialog</receiver>
<slot>accept()</slot>
<hints>
<hint type="sourcelabel">
<x>248</x>
<y>254</y>
</hint>
<hint type="destinationlabel">
<x>157</x>
<y>274</y>
</hint>
</hints>
</connection>
<connection>
<sender>buttonBox</sender>
<signal>rejected()</signal>
<receiver>Dialog</receiver>
<slot>reject()</slot>
<hints>
<hint type="sourcelabel">
<x>316</x>
<y>260</y>
</hint>
<hint type="destinationlabel">
<x>286</x>
<y>274</y>
</hint>
</hints>
</connection>
</connections>
</ui>
|
import os
import shutil
class BasicOperations_TestClass:
TEST_ROOT =' __test_root__'
def setUp(self):
self.regenerate_root
print(self.TEST_ROOT)
assert os.path.isdir(self.TEST_ROOT)
def tearDown(self):
return True
def test_test(self):
assert self.bar == 1
def regenerate_root(self):
if os.path.isdir(self.TEST_ROOT):
shutil.rmtree(self.TEST_ROOTT)
os.makedirs(self.TEST_ROOT)
|
import Adafruit_BBIO.GPIO as GPIO
import time
a=0
b=0
def derecha(channel):
global a
a+=1
print 'cuenta derecha es {0}'.format(a)
def izquierda(channel):
global b
b+=1
print 'cuenta izquierda es {0}'.format(b)
GPIO.setup("P9_11", GPIO.IN)
GPIO.setup("P9_13", GPIO.IN)
GPIO.add_event_detect("P9_11", GPIO.BOTH)
GPIO.add_event_detect("P9_13", GPIO.BOTH)
GPIO.add_event_callback("P9_11",derecha)
GPIO.add_event_callback("P9_13",izquierda)
while True:
print "cosas pasan"
time.sleep(1)
|
from __future__ import absolute_import, unicode_literals
import logging
from django.core.cache import cache
from django.test import TestCase
from django.contrib import admin
from physical.tests.factory import DiskOfferingFactory, EnvironmentFactory
from physical.errors import NoDiskOfferingGreaterError, NoDiskOfferingLesserError
from system.models import Configuration
from ..admin.disk_offering import DiskOfferingAdmin
from ..forms.disk_offerring import DiskOfferingForm
from ..models import DiskOffering
LOG = logging.getLogger(__name__)
SEARCH_FIELDS = ('name', )
LIST_FIELDS = ('name', 'size_gb', 'selected_environments')
SAVE_ON_TOP = True
UNICODE_FORMAT = '{}'
class DiskOfferingTestCase(TestCase):
def create_basic_disks(self):
for disk_offering in DiskOffering.objects.all():
for plan in disk_offering.plans.all():
plan.databaseinfras.all().delete()
disk_offering.plans.all().delete()
disk_offering.delete()
cache.clear()
self.bigger = DiskOfferingFactory()
self.bigger.size_kb *= 30
self.bigger.environments.add(self.environment)
self.bigger.save()
self.medium = DiskOfferingFactory()
self.medium.size_kb *= 20
self.medium.environments.add(self.environment)
self.medium.save()
self.smaller = DiskOfferingFactory()
self.smaller.size_kb *= 10
self.smaller.environments.add(self.environment)
self.smaller.save()
def setUp(self):
self.admin = DiskOfferingAdmin(DiskOffering, admin.sites.AdminSite())
self.auto_resize_max_size_in_gb = Configuration(
name='auto_resize_max_size_in_gb', value=100
)
self.auto_resize_max_size_in_gb.save()
self.environment = EnvironmentFactory()
def tearDown(self):
if self.auto_resize_max_size_in_gb.id:
self.auto_resize_max_size_in_gb.delete()
def test_search_fields(self):
self.assertEqual(SEARCH_FIELDS, self.admin.search_fields)
def test_list_fields(self):
self.assertEqual(LIST_FIELDS, self.admin.list_display)
def test_save_position(self):
self.assertEqual(SAVE_ON_TOP, self.admin.save_on_top)
def test_adding_gb_to_kb(self):
disk_offering_form = DiskOfferingForm(
data={
'name': 'disk_offering_small',
'size_gb': 0.5,
'environments': [self.environment.id]
}
)
self.assertTrue(disk_offering_form.is_valid())
self.admin.save_model(
request=None, obj=disk_offering_form.instance,
form=disk_offering_form, change=None
)
disk_offering = DiskOffering.objects.get(name='disk_offering_small')
self.assertEqual(disk_offering.size_gb(), 0.5)
self.assertEqual(disk_offering.size_kb, 524288)
def test_editing_gb_to_kb(self):
disk_factory = DiskOfferingFactory()
disk_offering = DiskOffering.objects.get(pk=disk_factory.pk)
self.assertEqual(disk_offering.size_gb(), 1)
self.assertEqual(disk_offering.size_kb, 1048576)
disk_offering_form = DiskOfferingForm(
data={
'name': disk_offering.name,
'size_gb': 1.5,
'environments': [self.environment.id]
},
instance=disk_offering
)
self.assertTrue(disk_offering_form.is_valid())
self.admin.save_model(
request=None, obj=disk_offering,
form=disk_offering_form, change=None
)
self.assertEqual(disk_offering.size_gb(), 1.5)
self.assertEqual(disk_offering.size_kb, 1572864)
def test_edit_initial_values(self):
disk_offering_form = DiskOfferingForm()
self.assertNotIn('name', disk_offering_form.initial)
self.assertIn('size_gb', disk_offering_form.initial)
self.assertIsNone(disk_offering_form.initial['size_gb'])
disk_factory = DiskOfferingFactory()
disk_offering = DiskOffering.objects.get(pk=disk_factory.pk)
disk_offering_form = DiskOfferingForm(instance=disk_offering)
self.assertEqual(
disk_offering_form.initial['name'], disk_offering.name
)
self.assertEqual(
disk_offering_form.initial['size_gb'], disk_offering.size_gb()
)
def test_model_sizes(self):
disk_factory = DiskOfferingFactory()
self.assertEqual(disk_factory.size_kb, 1048576)
self.assertEqual(disk_factory.size_gb(), 1.0)
self.assertEqual(disk_factory.size_bytes(), 1073741824)
disk_offering = DiskOffering()
self.assertIsNone(disk_offering.size_kb)
self.assertIsNone(disk_offering.size_gb())
self.assertIsNone(disk_offering.size_bytes())
def test_model_converter(self):
disk_factory = DiskOfferingFactory()
self.assertEqual(disk_factory.converter_kb_to_gb(1572864), 1.5)
self.assertEqual(disk_factory.converter_kb_to_bytes(524288), 536870912)
self.assertEqual(disk_factory.converter_gb_to_kb(0.75), 786432)
self.assertIsNone(disk_factory.converter_kb_to_gb(0))
self.assertIsNone(disk_factory.converter_kb_to_bytes(0))
self.assertIsNone(disk_factory.converter_gb_to_kb(0))
def test_unicode(self):
disk_offering = DiskOffering()
expected_unicode = UNICODE_FORMAT.format(disk_offering.name)
self.assertEqual(expected_unicode, str(disk_offering))
def test_disk_offering_is_in_admin(self):
self.assertIn(DiskOffering, admin.site._registry)
admin_class = admin.site._registry[DiskOffering]
self.assertIsInstance(admin_class, DiskOfferingAdmin)
def test_can_found_greater_disk(self):
self.create_basic_disks()
found = DiskOffering.first_greater_than(
self.smaller.size_kb, self.environment)
self.assertEqual(self.medium, found)
found = DiskOffering.first_greater_than(
self.medium.size_kb, self.environment)
self.assertEqual(self.bigger, found)
def test_cannot_found_greater_disk(self):
self.create_basic_disks()
self.assertRaises(
NoDiskOfferingGreaterError,
DiskOffering.first_greater_than, self.bigger.size_kb, self.environment
)
def test_can_found_greater_disk_with_exclude(self):
self.create_basic_disks()
found = DiskOffering.first_greater_than(
self.smaller.size_kb, self.environment, exclude_id=self.medium.id
)
self.assertEqual(self.bigger, found)
def test_can_found_disk_for_auto_resize(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.bigger.size_gb())
self.auto_resize_max_size_in_gb.save()
found = DiskOffering.last_offering_available_for_auto_resize(
self.environment)
self.assertEqual(self.bigger, found)
self.auto_resize_max_size_in_gb.value = int(self.bigger.size_gb()) - 1
self.auto_resize_max_size_in_gb.save()
found = DiskOffering.last_offering_available_for_auto_resize(
self.environment)
self.assertEqual(self.medium, found)
def test_cannot_found_disk_for_auto_resize(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.smaller.size_gb()) - 1
self.auto_resize_max_size_in_gb.save()
self.assertRaises(
NoDiskOfferingLesserError,
DiskOffering.last_offering_available_for_auto_resize, self.environment
)
def test_compare_disks(self):
self.create_basic_disks()
self.assertGreater(self.bigger, self.smaller)
self.assertLess(self.smaller, self.bigger)
self.medium_twice = DiskOfferingFactory()
self.medium_twice.size_kb *= 20
self.medium_twice.save()
self.assertEqual(self.medium, self.medium)
self.assertNotEqual(self.medium, self.medium_twice)
self.medium_twice.delete()
def test_disk_is_last_offering(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.medium.size_gb()) + 1
self.auto_resize_max_size_in_gb.save()
self.assertFalse(
self.smaller.is_last_auto_resize_offering(self.environment)
)
self.assertTrue(
self.medium.is_last_auto_resize_offering(self.environment)
)
self.assertFalse(
self.bigger.is_last_auto_resize_offering(self.environment)
)
def test_disk_is_last_offering_without_param(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.delete()
self.assertFalse(
self.smaller.is_last_auto_resize_offering(self.environment)
)
self.assertFalse(
self.medium.is_last_auto_resize_offering(self.environment)
)
self.assertTrue(
self.bigger.is_last_auto_resize_offering(self.environment)
)
|
from __future__ import print_function
from permuta import *
import permstruct
import permstruct.dag
from permstruct import *
from permstruct.dag import taylor_dag
import sys
task = '1234_1243_2134_2431_4213'
patts = [ Permutation([ int(c) for c in p ]) for p in task.split('_') ]
struct(patts, size=6, perm_bound = 8, subpatts_len=4, subpatts_num=3)
|
import sys
import os
import os.path as op
__version__ = '2.0.5'
from cfchecker.cfchecks import getargs, CFChecker
def cfchecks_main():
"""cfchecks_main is based on the main program block in cfchecks.py
"""
(badc,coards,uploader,useFileName,standardName,areaTypes,udunitsDat,version,files)=getargs(sys.argv)
inst = CFChecker(uploader=uploader, useFileName=useFileName, badc=badc, coards=coards, cfStandardNamesXML=standardName, cfAreaTypesXML=areaTypes, udunitsDat=udunitsDat, version=version)
for file in files:
rc = inst.checker(file)
sys.exit (rc)
|
"""fitsdiff is now a part of Astropy.
Now this module just provides a wrapper around astropy.io.fits.diff for backwards
compatibility with the old interface in case anyone uses it.
"""
import os
import sys
from astropy.io.fits.diff import FITSDiff
from astropy.io.fits.scripts.fitsdiff import log, main
def fitsdiff(input1, input2, comment_excl_list='', value_excl_list='',
field_excl_list='', maxdiff=10, delta=0.0, neglect_blanks=True,
output=None):
if isinstance(comment_excl_list, str):
comment_excl_list = list_parse(comment_excl_list)
if isinstance(value_excl_list, str):
value_excl_list = list_parse(value_excl_list)
if isinstance(field_excl_list, str):
field_excl_list = list_parse(field_excl_list)
diff = FITSDiff(input1, input2, ignore_keywords=value_excl_list,
ignore_comments=comment_excl_list,
ignore_fields=field_excl_list, numdiffs=maxdiff,
tolerance=delta, ignore_blanks=neglect_blanks)
if output is None:
output = sys.stdout
diff.report(output)
return diff.identical
def list_parse(name_list):
"""Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line.
"""
if name_list and name_list[0] == '@':
value = name_list[1:]
if not os.path.exists(value):
log.warning('The file %s does not exist' % value)
return
try:
return [v.strip() for v in open(value, 'r').readlines()]
except IOError as e:
log.warning('reading %s failed: %s; ignoring this file' %
(value, e))
else:
return [v.strip() for v in name_list.split(',')]
if __name__ == "__main__":
sys.exit(main())
|
"""
Model class that unites theory with data.
"""
import logging
logger = logging.getLogger('Model_mod')
import copy
import scipy
import SloppyCell
import SloppyCell.Residuals as Residuals
import SloppyCell.Collections as Collections
import SloppyCell.Utility as Utility
from . import KeyedList_mod as KeyedList_mod
KeyedList = KeyedList_mod.KeyedList
_double_epsilon_ = scipy.finfo(scipy.float_).eps
class Model:
"""
A Model object connects a set of experimental data with the objects used to
model that data.
Most importantly, a Model can calculate a cost for a given set of
parameters, characterizing how well those parameters fit the data contained
within the model.
"""
imag_cutoff = 1e-8
def __init__(self, expts, calcs):
"""
expts A sequence of Experiments to be fit to.
calcs A sequence of calculation objects referred to by the
Experiments.
"""
self.calcVals = {}
self.calcSensitivityVals = {}
self.internalVars = {}
self.internalVarsDerivs = {}
self.residuals = KeyedList()
if isinstance(expts, list):
expts = Collections.ExperimentCollection(expts)
elif isinstance(expts, dict):
expts = Collections.ExperimentCollection(expts.values())
self.SetExperimentCollection(expts)
if isinstance(calcs, list):
calcs = Collections.CalculationCollection(calcs)
elif isinstance(calcs, dict):
calcs = Collections.CalculationCollection(calcs.values())
self.SetCalculationCollection(calcs)
self.observers = KeyedList()
self.parameter_bounds = {}
def compile(self):
"""
Compile all the calculations contained within the Model.
"""
for calc in self.get_calcs().values():
calc.compile()
def copy(self):
return copy.deepcopy(self)
def get_params(self):
"""
Return a copy of the current model parameters
"""
return self.calcColl.GetParameters()
def get_ICs(self):
"""
Get the initial conditions currently present in a model
for dynamic variables that are not assigned variables.
Outputs:
KeyedList with keys (calcName,varName) --> initialValue
"""
ics=KeyedList()
for calcName, calc in self.calcColl.items():
for varName in calc.dynamicVars.keys():
if varName in calc.assignedVars.keys(): continue
ics.set( (calcName,varName), calc.get_var_ic(varName))
return ics
def set_ICs(self, ics):
"""
Sets the initial conditions into the model. Uses the input
format defined by 'getICs'.
Inputs:
ics -- Initial conditions to set in KeyedList form:
keys: (calcName, varName) --> intialValue
Outputs:
None
"""
for (calcName, varName), initialValue in ics.items():
self.calcColl.get(calcName).set_var_ic(varName, initialValue)
def _evaluate(self, params, T=1):
"""
Evaluate the cost for the model, returning the intermediate residuals,
and chi-squared.
(Summing up the residuals is a negligible amount of work. This
arrangment makes notification of observers much simpler.)
"""
self.params.update(params)
self.check_parameter_bounds(params)
self.CalculateForAllDataPoints(params)
self.ComputeInternalVariables(T)
resvals = [res.GetValue(self.calcVals, self.internalVars, self.params)
for res in self.residuals.values()]
# Occasionally it's useful to use residuals with a sqrt(-1) in them,
# to get negative squares. Then, however, we might get small imaginary
# parts in our results, which this shaves off.
chisq = scipy.real_if_close(scipy.sum(scipy.asarray(resvals)**2),
tol=self.imag_cutoff)
if scipy.isnan(chisq):
logger.warn('Chi^2 is NaN, converting to Infinity.')
chisq = scipy.inf
cost = 0.5 * chisq
entropy = 0
for expt, sf_ents in self.internalVars['scaleFactor_entropies'].items():
for group, ent in sf_ents.items():
entropy += ent
self._notify(event = 'evaluation',
resvals = resvals,
chisq = chisq,
cost = cost,
free_energy = cost-T*entropy,
entropy = entropy,
params = self.params)
return resvals, chisq, cost, entropy
def res(self, params):
"""
Return the residual values of the model fit given a set of parameters
"""
return self._evaluate(params)[0]
def res_log_params(self, log_params):
"""
Return the residual values given the logarithm of the parameters
"""
return self.res(scipy.exp(log_params))
def res_dict(self, params):
"""
Return the residual values of the model fit given a set of parameters
in dictionary form.
"""
return dict(zip(self.residuals.keys(), self.res(params)))
def chisq(self, params):
"""
Return the sum of the squares of the residuals for the model
"""
return self._evaluate(params)[1]
def redchisq(self, params):
"""
Return chi-squared divided by the number of degrees of freedom
Question: Are priors to be included in the N data points?
How do scale factors change the number of d.o.f.?
"""
return self.chisq(params)/(len(self.residuals) - len(self.params))
def cost(self, params):
"""
Return the cost (1/2 chisq) of the model
"""
return self._evaluate(params)[2]
def cost_log_params(self, log_params):
"""
Return the cost given the logarithm of the input parameters
"""
return self.cost(scipy.exp(log_params))
def free_energy(self, params, T):
temp, temp, c, entropy = self._evaluate(params, T=T)
return c - T * entropy
def _notify(self, **args):
"""
Call all observers with the given arguments.
"""
for obs in self.observers:
obs(**args)
def attach_observer(self, obs_key, observer):
"""
Add an observer to be notified by this Model.
"""
self.observers.set(obs_key, observer)
def detach_observer(self, obs_key):
"""
Remove an observer from the Model.
"""
self.observers.remove_by_key(obs_key)
def get_observers(self):
"""
Return the KeyedList of observers for this model.
"""
return self.observers
def reset_observers(self):
"""
Call reset() for all attached observers.
"""
for obs in self.observers:
if hasattr(obs, 'reset'):
obs.reset()
resDict = res_dict
# ...
def AddResidual(self, res):
self.residuals.setByKey(res.key, res)
def Force(self, params, epsf, relativeScale=False, stepSizeCutoff=None):
"""
Force(parameters, epsilon factor) -> list
Returns a list containing the numerical gradient of the cost with
respect to each parameter (in the parameter order of the
CalculationCollection). Each element of the gradient is:
cost(param + eps) - cost(param - eps)/(2 * eps).
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
force = []
params = scipy.array(params)
if stepSizeCutoff==None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
if relativeScale is True:
eps = epsf * abs(params)
else:
eps = epsf * scipy.ones(len(params),scipy.float_)
for i in range(0,len(eps)):
if eps[i] < stepSizeCutoff:
eps[i] = stepSizeCutoff
for index, param in enumerate(params):
paramsPlus = params.copy()
paramsPlus[index] = param + eps[index]
costPlus = self.cost(paramsPlus)
paramsMinus = params.copy()
paramsMinus[index] = param - eps[index]
costMinus = self.cost(paramsMinus)
force.append((costPlus-costMinus)/(2.0*eps[index]))
return force
def gradient_sens(self, params):
"""
Return the gradient of the cost, d_cost/d_param as a KeyedList.
This method uses sensitivity integration, so it only applies to
ReactionNetworks.
"""
self.params.update(params)
# The cost is 0.5 * sum(res**2),
# so the gradient is sum(res * dres_dp)
jac_dict = self.jacobian_sens(params)
res_dict = self.res_dict(params)
force = scipy.zeros(len(params), scipy.float_)
for res_key, res_val in res_dict.items():
res_derivs = jac_dict.get(res_key)
force += res_val * scipy.asarray(res_derivs)
gradient = self.params.copy()
gradient.update(force)
return gradient
def gradient_log_params_sens(self, log_params):
"""
Return the gradient of the cost wrt log parameters, d_cost/d_log_param
as a KeyedList.
This method uses sensitivity integration, so it only applies to
ReactionNetworks.
"""
# We just need to multiply dcost_dp by p.
params = scipy.exp(log_params)
gradient = self.gradient_sens(params)
gradient_log = gradient.copy()
gradient_log.update(scipy.asarray(gradient) * scipy.asarray(params))
return gradient_log
def CalculateForAllDataPoints(self, params):
"""
CalculateForAllDataPoints(parameters) -> dictionary
Gets a dictionary of measured independent variables indexed by
calculation from the ExperimentCollection and passes it to the
CalculationCollection. The returned dictionary is of the form:
dictionary[experiment][calculation][dependent variable]
[independent variabled] -> calculated value.
"""
self.params.update(params)
varsByCalc = self.GetExperimentCollection().GetVarsByCalc()
self.calcVals = self.GetCalculationCollection().Calculate(varsByCalc,
params)
return self.calcVals
def CalculateSensitivitiesForAllDataPoints(self, params):
"""
CalculateSensitivitiesForAllDataPoints(parameters) -> dictionary
Gets a dictionary of measured independent variables indexed by
calculation from the ExperimentCollection and passes it to the
CalculationCollection. The returned dictionary is of the form:
dictionary[experiment][calculation][dependent variable]
[independent variabled][parameter] -> sensitivity.
"""
varsByCalc = self.GetExperimentCollection().GetVarsByCalc()
self.calcVals, self.calcSensitivityVals =\
self.GetCalculationCollection().CalculateSensitivity(varsByCalc,
params)
return self.calcSensitivityVals
def ComputeInternalVariables(self, T=1):
sf, sf_ents = self.compute_scale_factors(T)
self.internalVars['scaleFactors'] = sf
self.internalVars['scaleFactor_entropies'] = sf_ents
def compute_scale_factors(self, T):
"""
Compute the scale factors for the current parameters and return a dict.
The dictionary is of the form dict[exptId][varId] = scale_factor
"""
scale_factors = {}
scale_factor_entropies = {}
for exptId, expt in self.GetExperimentCollection().items():
scale_factors[exptId], scale_factor_entropies[exptId] =\
self._compute_sf_and_sfent_for_expt(expt, T)
return scale_factors, scale_factor_entropies
def _compute_sf_and_sfent_for_expt(self, expt, T):
# Compute the scale factors for a given experiment
scale_factors = {}
scale_factor_entropies = {}
exptData = expt.GetData()
expt_integral_data = expt.GetIntegralDataSets()
fixed_sf = expt.get_fixed_sf()
sf_groups = expt.get_sf_groups()
for group in sf_groups:
# Do any of the variables in this group have fixed scale factors?
fixed = set(group).intersection(set(fixed_sf.keys()))
fixedAt = set([fixed_sf[var] for var in fixed])
# We'll need to index the scale factor entropies on the *group*
# that shares a scale factor, since we only have one entropy per
# shared scale factor. So we need to index on the group of
# variables. We sort the group and make it hashable to avoid any
# double-counting.
hash_group = expt._hashable_group(group)
if len(fixedAt) == 1:
value = fixedAt.pop()
for var in group:
scale_factors[var] = value
scale_factor_entropies[hash_group] = 0
continue
elif len(fixedAt) > 1:
raise ValueError('Shared scale factors fixed at '
'inconsistent values in experiment '
'%s!' % expt.GetName())
# Finally, compute the scale factor for this group
theoryDotData, theoryDotTheory = 0, 0
# For discrete data
for calc in exptData:
# Pull out the vars we have measured for this calculation
for var in set(group).intersection(set(exptData[calc].keys())):
for indVar, (data, error) in exptData[calc][var].items():
theory = self.calcVals[calc][var][indVar]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
# Now for integral data
for dataset in expt_integral_data:
calc = dataset['calcKey']
theory_traj = self.calcVals[calc]['full trajectory']
data_traj = dataset['trajectory']
uncert_traj = dataset['uncert_traj']
interval = dataset['interval']
T = interval[1] - interval[0]
for var in group.intersection(set(dataset['vars'])):
TheorDotT = self._integral_theorytheory(var, theory_traj,
uncert_traj,
interval)
theoryDotTheory += TheorDotT/T
TheorDotD = self._integral_theorydata(var, theory_traj,
data_traj,
uncert_traj,
interval)
theoryDotData += TheorDotD/T
# Now for the extrema data
for ds in expt.scaled_extrema_data:
calc = ds['calcKey']
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
data, error = ds['val'], ds['sigma']
theory = self.calcVals[calc][var]\
[ds['minTime'],ds['maxTime']][1]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
for var in group:
if theoryDotTheory != 0:
scale_factors[var] = theoryDotData/theoryDotTheory
else:
scale_factors[var] = 1
entropy = expt.compute_sf_entropy(hash_group, theoryDotTheory,
theoryDotData, T)
scale_factor_entropies[hash_group] = entropy
return scale_factors, scale_factor_entropies
def _integral_theorytheory(self, var, theory_traj, uncert_traj, interval):
def integrand(t):
theory = theory_traj.evaluate_interpolated_traj(var, t)
uncert = uncert_traj.evaluate_interpolated_traj(var, t)
return theory**2/uncert**2
val, error = scipy.integrate.quad(integrand, interval[0], interval[1],
limit=int(1e5))
return val
def _integral_theorydata(self, var, theory_traj, data_traj, uncert_traj,
interval):
def integrand(t):
theory = theory_traj.evaluate_interpolated_traj(var, t)
data = data_traj.evaluate_interpolated_traj(var, t)
uncert = uncert_traj.evaluate_interpolated_traj(var, t)
return theory*data/uncert**2
val, error = scipy.integrate.quad(integrand, interval[0], interval[1],
limit=int(1e5))
return val
def ComputeInternalVariableDerivs(self):
"""
compute_scale_factorsDerivs() -> dictionary
Returns the scale factor derivatives w.r.t. parameters
appropriate for each chemical in each
experiment, given the current parameters. The returned dictionary
is of the form: internalVarsDerivs['scaleFactors'] \
= dict[experiment][chemical][parametername] -> derivative.
"""
self.internalVarsDerivs['scaleFactors'] = {}
p = self.GetCalculationCollection().GetParameters()
for exptName, expt in self.GetExperimentCollection().items():
self.internalVarsDerivs['scaleFactors'][exptName] = {}
exptData = expt.GetData()
# Get the dependent variables measured in this experiment
exptDepVars = set()
for calc in exptData:
exptDepVars.update(set(expt.GetData()[calc].keys()))
# Now for the extrema data
for ds in expt.scaled_extrema_data:
exptDepVars.add(ds['var'])
for depVar in exptDepVars:
self.internalVarsDerivs['scaleFactors'][exptName][depVar] = {}
if depVar in expt.GetFixedScaleFactors():
for pname in p.keys():
self.internalVarsDerivs['scaleFactors'][exptName]\
[depVar][pname] = 0.0
continue
theoryDotData, theoryDotTheory = 0, 0
for calc in exptData:
if depVar in exptData[calc].keys():
for indVar, (data, error)\
in exptData[calc][depVar].items():
theory = self.calcVals[calc][depVar][indVar]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
for ds in expt.scaled_extrema_data:
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
data, error = ds['val'], ds['sigma']
theory = self.calcVals[ds['calcKey']][var]\
[ds['minTime'],ds['maxTime']][1]
theoryDotData += (theory * data) / error**2
theoryDotTheory += theory**2 / error**2
# now get derivative of the scalefactor
for pname in p.keys():
theorysensDotData, theorysensDotTheory = 0, 0
for calc in exptData:
clc = self.calcColl.get(calc)
if depVar in exptData[calc].keys():
for indVar, (data, error)\
in exptData[calc][depVar].items():
theory = self.calcVals[calc][depVar][indVar]
# Default to 0 if sensitivity not calculated for
# that parameter (i.e. it's not in the
# Calculation)
theorysens = self.calcSensitivityVals[calc][depVar][indVar].get(pname, 0.0)
theorysensDotData += (theorysens * data) / error**2
theorysensDotTheory += (theorysens * theory) / error**2
for ds in expt.scaled_extrema_data:
if ds['type'] == 'max':
var = ds['var'] + '_maximum'
elif ds['type'] == 'min':
var = ds['var'] + '_minimum'
theory = self.calcVals[ds['calcKey']][var]\
[ds['minTime'],ds['maxTime']][1]
data, error = ds['val'], ds['sigma']
theorysens = self.calcSensitivityVals[ds['calcKey']][var][ds['minTime'],ds['maxTime']].get(pname, 0.0)
theorysensDotData += (theorysens * data) / error**2
theorysensDotTheory += (theorysens * theory) / error**2
deriv_dict = self.internalVarsDerivs['scaleFactors'][exptName][depVar]
try:
deriv_dict[pname] = theorysensDotData/theoryDotTheory\
- 2*theoryDotData*theorysensDotTheory/(theoryDotTheory)**2
except ZeroDivisionError:
deriv_dict[pname] = 0
return self.internalVarsDerivs['scaleFactors']
def jacobian_log_params_sens(self, log_params):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
the lograithms of the parameters parameters.
The method uses the sensitivity integration. As such, it will only
work with ReactionNetworks.
The KeyedList is of the form:
kl.get(resId) = [dres/dlogp1, dres/dlogp2...]
"""
params = scipy.exp(log_params)
j = self.jacobian_sens(params)
j_log = j.copy()
j_log.update(scipy.asarray(j) * scipy.asarray(params))
return j_log
def jacobian_sens(self, params):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
parameters.
The method uses the sensitivity integration. As such, it will only
work with ReactionNetworks.
The KeyedList is of the form:
kl[resId] = [dres/dp1, dres/dp2...]
"""
self.params.update(params)
# Calculate sensitivities
self.CalculateSensitivitiesForAllDataPoints(params)
self.ComputeInternalVariables()
self.ComputeInternalVariableDerivs()
# Calculate residual derivatives
deriv = [(resId, res.Dp(self.calcVals, self.calcSensitivityVals,
self.internalVars, self.internalVarsDerivs,
self.params))
for (resId, res) in self.residuals.items()]
return KeyedList(deriv)
def jacobian_fd(self, params, eps,
relativeScale=False, stepSizeCutoff=None):
"""
Return a KeyedList of the derivatives of the model residuals w.r.t.
parameters.
The method uses finite differences.
Inputs:
params -- Parameters about which to calculate the jacobian
eps -- Step size to take, may be vector or scalar.
relativeScale -- If true, the eps is taken to be the fractional
change in parameter to use in finite differences.
stepSizeCutoff -- Minimum step size to take.
"""
res = self.resDict(params)
orig_vals = scipy.array(params)
if stepSizeCutoff is None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
if relativeScale:
eps_l = scipy.maximum(eps * abs(params), stepSizeCutoff)
else:
eps_l = scipy.maximum(eps * scipy.ones(len(params),scipy.float_),
stepSizeCutoff)
J = KeyedList() # will hold the result
for resId in res.keys():
J.set(resId, [])
# Two-sided finite difference
for ii in range(len(params)):
params[ii] = orig_vals[ii] + eps_l[ii]
resPlus = self.resDict(params)
params[ii] = orig_vals[ii] - eps_l[ii]
resMinus = self.resDict(params)
params[ii] = orig_vals[ii]
for resId in res.keys():
res_deriv = (resPlus[resId]-resMinus[resId])/(2.*eps_l[ii])
J.get(resId).append(res_deriv)
# NOTE: after call to ComputeResidualsWithScaleFactors the Model's
# parameters get updated, must reset this:
self.params.update(params)
return J
def GetJacobian(self,params):
"""
GetJacobian(parameters) -> dictionary
Gets a dictionary of the sensitivities at the time points of
the independent variables for the measured dependent variables
for each calculation and experiment.
Form:
dictionary[(experiment,calculation,dependent variable,
independent variable)] -> result
result is a vector of length number of parameters containing
the sensitivity at that time point, in the order of the ordered
parameters
"""
return self.jacobian_sens(params)
def Jacobian(self, params, epsf, relativeScale=False, stepSizeCutoff=None):
"""
Finite difference the residual dictionary to get a dictionary
for the Jacobian. It will be indexed the same as the residuals.
Note: epsf is either a scalar or an array.
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
return self.jacobian_fd(params, epsf,
relativeScale, stepSizeCutoff)
def GetJandJtJ(self,params):
j = self.GetJacobian(params)
mn = scipy.zeros((len(params),len(params)),scipy.float_)
for paramind in range(0,len(params)):
for paramind1 in range(0,len(params)):
sum = 0.0
for kys in j.keys():
sum = sum + j.get(kys)[paramind]*j.get(kys)[paramind1]
mn[paramind][paramind1] = sum
return j,mn
def GetJandJtJInLogParameters(self,params):
# Formula below is exact if you have perfect data. If you don't
# have perfect data (residuals != 0) you get an extra term when you
# compute d^2(cost)/(dlogp[i]dlogp[j]) which is
# sum_resname (residual[resname] * jac[resname][j] * delta_jk * p[k])
# but can be ignored when residuals are zeros, and maybe should be
# ignored altogether because it can make the Hessian approximation
# non-positive definite
pnolog = scipy.exp(params)
jac, jtj = self.GetJandJtJ(pnolog)
for i in range(len(params)):
for j in range(len(params)):
jtj[i][j] = jtj[i][j]*pnolog[i]*pnolog[j]
res = self.resDict(pnolog)
for resname in self.residuals.keys():
for j in range(len(params)):
# extra term --- not including it
# jtj[j][j] += res[resname]*jac[resname][j]*pnolog[j]
jac.get(resname)[j] = jac.get(resname)[j]*pnolog[j]
return jac,jtj
def hessian_elem(self, func, f0, params, i, j, epsi, epsj,
relativeScale, stepSizeCutoff, verbose):
"""
Return the second partial derivative for func w.r.t. parameters i and j
f0: The value of the function at params
eps: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
"""
origPi, origPj = params[i], params[j]
if relativeScale:
# Steps sizes are given by eps*the value of the parameter,
# but the minimum step size is stepSizeCutoff
hi, hj = scipy.maximum((epsi*abs(origPi), epsj*abs(origPj)),
(stepSizeCutoff, stepSizeCutoff))
else:
hi, hj = epsi, epsj
if i == j:
params[i] = origPi + hi
fp = func(params)
params[i] = origPi - hi
fm = func(params)
element = (fp - 2*f0 + fm)/hi**2
else:
## f(xi + hi, xj + h)
params[i] = origPi + hi
params[j] = origPj + hj
fpp = func(params)
## f(xi + hi, xj - hj)
params[i] = origPi + hi
params[j] = origPj - hj
fpm = func(params)
## f(xi - hi, xj + hj)
params[i] = origPi - hi
params[j] = origPj + hj
fmp = func(params)
## f(xi - hi, xj - hj)
params[i] = origPi - hi
params[j] = origPj - hj
fmm = func(params)
element = (fpp - fpm - fmp + fmm)/(4 * hi * hj)
params[i], params[j] = origPi, origPj
self._notify(event = 'hessian element', i = i, j = j,
element = element)
if verbose:
print('hessian[%i, %i] = %g' % (i, j, element))
return element
def hessian(self, params, epsf, relativeScale = True,
stepSizeCutoff = None, jacobian = None,
verbose = False):
"""
Returns the hessian of the model.
epsf: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
jacobian: If the jacobian is passed, it will be used to estimate
the step size to take.
vebose: If True, a message will be printed with each hessian element
calculated
"""
nOv = len(params)
if stepSizeCutoff is None:
stepSizeCutoff = scipy.sqrt(_double_epsilon_)
params = scipy.asarray(params)
if relativeScale:
eps = epsf * abs(params)
else:
eps = epsf * scipy.ones(len(params),scipy.float_)
# Make sure we don't take steps smaller than stepSizeCutoff
eps = scipy.maximum(eps, stepSizeCutoff)
if jacobian is not None:
# Turn off the relative scaling since that would overwrite all this
relativeScale = False
jacobian = scipy.asarray(jacobian)
if len(jacobian.shape) == 0:
resDict = self.resDict(params)
new_jacobian = scipy.zeros(len(params),scipy.float_)
for key, value in resDict.items():
new_jacobian += 2.0*value*scipy.array(jacobian[0][key])
jacobian = new_jacobian
elif len(jacobian.shape) == 2: # Need to sum up the total jacobian
residuals = scipy.asarray(self.res(params))
# Changed by rng7. I'm not sure what is meant by "sum up the
# total jacobian". The following line failed due to shape
# mismatch. From the context below, it seems that the dot
# product is appropriate.
#jacobian = 2.0*residuals*jacobian
jacobian = 2.0 * scipy.dot(residuals, jacobian)
# If parameters are independent, then
# epsilon should be (sqrt(2)*J[i])^-1
factor = 1.0/scipy.sqrt(2)
for i in range(nOv):
if jacobian[i] == 0.0:
eps[i] = 0.5*abs(params[i])
else:
# larger than stepSizeCutoff, but not more than
# half of the original parameter value
eps[i] = min(max(factor/abs(jacobian[i]), stepSizeCutoff),
0.5*abs(params[i]))
## compute cost at f(x)
f0 = self.cost(params)
hess = scipy.zeros((nOv, nOv), scipy.float_)
## compute all (numParams*(numParams + 1))/2 unique hessian elements
for i in range(nOv):
for j in range(i, nOv):
hess[i][j] = self.hessian_elem(self.cost, f0,
params, i, j,
eps[i], eps[j],
relativeScale, stepSizeCutoff,
verbose)
hess[j][i] = hess[i][j]
return hess
def hessian_log_params(self, params, eps,
relativeScale=False, stepSizeCutoff=1e-6,
verbose=False):
"""
Returns the hessian of the model in log parameters.
eps: Sets the stepsize to try
relativeScale: If True, step i is of size p[i] * eps, otherwise it is
eps
stepSizeCutoff: The minimum stepsize to take
vebose: If True, a message will be printed with each hessian element
calculated
"""
nOv = len(params)
if scipy.isscalar(eps):
eps = scipy.ones(len(params), scipy.float_) * eps
## compute cost at f(x)
f0 = self.cost_log_params(scipy.log(params))
hess = scipy.zeros((nOv, nOv), scipy.float_)
## compute all (numParams*(numParams + 1))/2 unique hessian elements
for i in range(nOv):
for j in range(i, nOv):
hess[i][j] = self.hessian_elem(self.cost_log_params, f0,
scipy.log(params),
i, j, eps[i], eps[j],
relativeScale, stepSizeCutoff,
verbose)
hess[j][i] = hess[i][j]
return hess
def CalcHessianInLogParameters(self, params, eps, relativeScale = False,
stepSizeCutoff = 1e-6, verbose = False):
return self.hessian_log_params(params, eps, relativeScale,
stepSizeCutoff, verbose)
def CalcHessian(self, params, epsf, relativeScale = True,
stepSizeCutoff = None, jacobian = None, verbose = False):
"""
Finite difference the residual dictionary to get a dictionary
for the Hessian. It will be indexed the same as the residuals.
Note: epsf is either a scalar or an array.
If relativeScale is False then epsf is the stepsize used (it should
already be multiplied by typicalValues before Jacobian is called)
If relativeScale is True then epsf is multiplied by params.
The two previous statements hold for both scalar and vector valued
epsf.
"""
return self.hessian(params, epsf, relativeScale,
stepSizeCutoff, jacobian, verbose)
def CalcResidualResponseArray(self, j, h):
"""
Calculate the Residual Response array. This array represents the change
in a residual obtained by a finite change in a data value.
Inputs:
(self, j, h)
j -- jacobian matrix to use
h -- hessian matrix to use
Outputs:
response -- The response array
"""
j,h = scipy.asarray(j), scipy.asarray(h)
[m,n] = j.shape
response = scipy.zeros((m,m),scipy.float_)
ident = scipy.eye(m,typecode=scipy.float_)
hinv = scipy.linalg.pinv2(h,1e-40)
tmp = scipy.dot(hinv,scipy.transpose(j))
tmp2 = scipy.dot(j,tmp)
response = ident - tmp2
return response
def CalcParameterResponseToResidualArray(self,j,h):
"""
Calculate the parameter response to residual array. This array
represents the change in parameter resulting from a change in data
(residual).
Inputs:
(self, j, h)
j -- jacobian matrix to use
h -- hessian matrix to use
Outputs:
response -- The response array
"""
j,h = scipy.asarray(j), scipy.asarray(h)
[m,n] = j.shape
response = scipy.zeros((n,m),scipy.float_)
hinv = scipy.linalg.pinv2(h,1e-40)
response = -scipy.dot(hinv,scipy.transpose(j))
return response
############################################################################
# Getting/Setting variables below
def SetExperimentCollection(self, exptColl):
self.exptColl = exptColl
for exptKey, expt in exptColl.items():
exptData = expt.GetData()
for calcKey, calcData in exptData.items():
for depVarKey, depVarData in calcData.items():
sortedData = depVarData.items()
sortedData.sort()
for indVar, (value, uncert) in sortedData:
resName = (exptKey, calcKey, depVarKey, indVar)
res = Residuals.ScaledErrorInFit(resName, depVarKey,
calcKey, indVar, value,
uncert, exptKey)
self.residuals.setByKey(resName, res)
# Add in the PeriodChecks
for period in expt.GetPeriodChecks():
calcKey, depVarKey, indVarValue = period['calcKey'], \
period['depVarKey'], period['startTime']
resName = (exptKey, calcKey, depVarKey, indVarValue,
'PeriodCheck')
res = Residuals.PeriodCheckResidual(resName, calcKey, depVarKey,
indVarValue,
period['period'],
period['sigma'])
self.residuals.setByKey(resName, res)
# Add in the AmplitudeChecks
for amplitude in expt.GetAmplitudeChecks():
calcKey, depVarKey = amplitude['calcKey'], \
amplitude['depVarKey']
indVarValue0, indVarValue1 = amplitude['startTime'],\
amplitude['testTime']
resName = (exptKey, calcKey, depVarKey, indVarValue0,
indVarValue1, 'AmplitudeCheck')
res = Residuals.AmplitudeCheckResidual(resName, calcKey,
depVarKey, indVarValue0,
indVarValue1,
amplitude['period'],
amplitude['sigma'],
exptKey)
self.residuals.setByKey(resName, res)
# Add in the integral data
for ds in expt.GetIntegralDataSets():
for var in ds['vars']:
resName = (exptKey, ds['calcKey'], var, 'integral data')
res = Residuals.IntegralDataResidual(resName, var,
exptKey,
ds['calcKey'],
ds['trajectory'],
ds['uncert_traj'],
ds['interval'])
self.residuals.setByKey(resName, res)
for ds in expt.scaled_extrema_data:
ds['exptKey'] = expt.name
ds['key'] = '%s_%simum_%s_%s' % (ds['var'], ds['type'],
str(ds['minTime']),
str(ds['maxTime']))
res = Residuals.ScaledExtremum(**ds)
self.AddResidual(res)
def get_expts(self):
return self.exptColl
def set_var_optimizable(self, var, is_optimizable):
for calc in self.get_calcs().values():
try:
calc.set_var_optimizable(var, is_optimizable)
except KeyError:
pass
self.params = self.calcColl.GetParameters()
GetExperimentCollection = get_expts
def SetCalculationCollection(self, calcColl):
self.calcColl = calcColl
self.params = calcColl.GetParameters()
def get_calcs(self):
return self.calcColl
GetCalculationCollection = get_calcs
def GetScaleFactors(self):
return self.internalVars['scaleFactors']
def GetResiduals(self):
return self.residuals
def GetCalculatedValues(self):
return self.calcVals
def GetInternalVariables(self):
return self.internalVars
def add_parameter_bounds(self, param_id, pmin, pmax):
"""
Add bounds on a specific parameter.
Cost evaluations will raise an exception if these bounds are violated.
"""
self.parameter_bounds[param_id] = pmin, pmax
def check_parameter_bounds(self, params):
self.params.update(params)
for id, (pmin, pmax) in self.parameter_bounds.items():
if not pmin <= self.params.get(id) <= pmax:
err = 'Parameter %s has value %f, which is outside of given bounds %f to %f.' % (id, self.params.get(id), pmin, pmax)
raise Utility.SloppyCellException(err)
|
"""basic_modules defines basic VisTrails Modules that are used in most
pipelines."""
from __future__ import division
import vistrails.core.cache.hasher
from vistrails.core.debug import format_exception
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.vistrails_module import Module, new_module, \
Converter, NotCacheable, ModuleError
from vistrails.core.modules.config import ConstantWidgetConfig, \
QueryWidgetConfig, ParamExpWidgetConfig, ModuleSettings, IPort, OPort, \
CIPort
import vistrails.core.system
from vistrails.core.utils import InstanceObject
from vistrails.core import debug
from abc import ABCMeta
from ast import literal_eval
from itertools import izip
import mimetypes
import os
import pickle
import re
import shutil
import zipfile
import urllib
try:
import hashlib
sha_hash = hashlib.sha1
except ImportError:
import sha
sha_hash = sha.new
version = '2.1.1'
name = 'Basic Modules'
identifier = 'org.vistrails.vistrails.basic'
old_identifiers = ['edu.utah.sci.vistrails.basic']
constant_config_path = "vistrails.gui.modules.constant_configuration"
query_config_path = "vistrails.gui.modules.query_configuration"
paramexp_config_path = "vistrails.gui.modules.paramexplore"
def get_port_name(port):
if hasattr(port, 'name'):
return port.name
else:
return port[0]
class meta_add_value_ports(type):
def __new__(cls, name, bases, dct):
"""This metaclass adds the 'value' input and output ports.
"""
mod = type.__new__(cls, name, bases, dct)
if '_input_ports' in mod.__dict__:
input_ports = mod._input_ports
if not any(get_port_name(port_info) == 'value'
for port_info in input_ports):
mod._input_ports = [('value', mod)]
mod._input_ports.extend(input_ports)
else:
mod._input_ports = [('value', mod)]
if '_output_ports' in mod.__dict__:
output_ports = mod._output_ports
if not any(get_port_name(port_info) == 'value'
for port_info in output_ports):
mod._output_ports = [('value', mod)]
mod._output_ports.extend(output_ports)
else:
mod._output_ports = [('value', mod)]
return mod
class Constant(Module):
"""Base class for all Modules that represent a constant value of
some type.
When implementing your own constant, You have to adhere to the
following interface:
Implement the following methods:
translate_to_python(x): Given a string, translate_to_python
must return a python value that will be the value seen by the
execution modules.
For example, translate_to_python called on a float parameter
with value '3.15' will return float('3.15').
translate_to_string(): Return a string representation of the
current constant, which will eventually be passed to
translate_to_python.
validate(v): return True if given python value is a plausible
value for the constant. It should be implemented such that
validate(translate_to_python(x)) == True for all valid x
A constant must also expose its default value, through the field
default_value.
There are fields you are not allowed to use in your constant classes.
These are: 'id', 'interpreter', 'logging' and 'change_parameter'
You can also define the constant's own GUI widget.
See core/modules/constant_configuration.py for details.
"""
_settings = ModuleSettings(abstract=True)
_output_ports = [OPort("value_as_string", "String")]
__metaclass__ = meta_add_value_ports
@staticmethod
def validate(x):
raise NotImplementedError
@staticmethod
def translate_to_python(x):
raise NotImplementedError
def compute(self):
"""Constant.compute() only checks validity (and presence) of
input value."""
v = self.get_input("value")
b = self.validate(v)
if not b:
raise ModuleError(self, "Internal Error: Constant failed validation")
self.set_output("value", v)
self.set_output("value_as_string", self.translate_to_string(v))
def setValue(self, v):
self.set_output("value", self.translate_to_python(v))
self.upToDate = True
@staticmethod
def translate_to_string(v):
return str(v)
@staticmethod
def get_widget_class():
# return StandardConstantWidget
return None
@staticmethod
def query_compute(value_a, value_b, query_method):
if query_method == '==' or query_method is None:
return (value_a == value_b)
elif query_method == '!=':
return (value_a != value_b)
return False
def new_constant(name, py_conversion=None, default_value=None, validation=None,
widget_type=None,
str_conversion=None, base_class=Constant,
compute=None, query_compute=None):
"""new_constant(name: str,
py_conversion: callable,
default_value: python_type,
validation: callable,
widget_type: (path, name) tuple or QWidget type,
str_conversion: callable,
base_class: class,
compute: callable,
query_compute: static callable) -> Module
new_constant dynamically creates a new Module derived from
Constant with given py_conversion and str_conversion functions, a
corresponding python type and a widget type. py_conversion is a
python callable that takes a string and returns a python value of
the type that the class should hold. str_conversion does the reverse.
This is the quickest way to create new Constant Modules."""
d = {}
if py_conversion is not None:
d["translate_to_python"] = py_conversion
elif base_class == Constant:
raise ValueError("Must specify translate_to_python for constant")
if validation is not None:
d["validate"] = validation
elif base_class == Constant:
raise ValueError("Must specify validation for constant")
if default_value is not None:
d["default_value"] = default_value
if str_conversion is not None:
d['translate_to_string'] = str_conversion
if compute is not None:
d['compute'] = compute
if query_compute is not None:
d['query_compute'] = query_compute
if widget_type is not None:
@staticmethod
def get_widget_class():
return widget_type
d['get_widget_class'] = get_widget_class
m = new_module(base_class, name, d)
m._input_ports = [('value', m)]
m._output_ports = [('value', m)]
return m
class Boolean(Constant):
_settings = ModuleSettings(
constant_widget='%s:BooleanWidget' % constant_config_path)
default_value = False
@staticmethod
def translate_to_python(x):
s = x.upper()
if s == 'TRUE':
return True
if s == 'FALSE':
return False
raise ValueError('Boolean from String in VisTrails should be either '
'"true" or "false", got "%s" instead' % x)
@staticmethod
def validate(x):
return isinstance(x, bool)
class Float(Constant):
_settings = ModuleSettings(constant_widgets=[
QueryWidgetConfig('%s:NumericQueryWidget' % query_config_path),
ParamExpWidgetConfig('%s:FloatExploreWidget' % paramexp_config_path)])
default_value = 0.0
@staticmethod
def translate_to_python(x):
return float(x)
@staticmethod
def validate(x):
return isinstance(x, (int, long, float))
@staticmethod
def query_compute(value_a, value_b, query_method):
value_a = float(value_a)
value_b = float(value_b)
if query_method == '==' or query_method is None:
return (value_a == value_b)
elif query_method == '<':
return (value_a < value_b)
elif query_method == '>':
return (value_a > value_b)
elif query_method == '<=':
return (value_a <= value_b)
elif query_method == '>=':
return (value_a >= value_b)
class Integer(Float):
_settings = ModuleSettings(constant_widgets=[
QueryWidgetConfig('%s:NumericQueryWidget' % query_config_path),
ParamExpWidgetConfig('%s:IntegerExploreWidget' % paramexp_config_path)])
default_value = 0
@staticmethod
def translate_to_python(x):
if x.startswith('0x'):
return int(x, 16)
else:
return int(x)
@staticmethod
def validate(x):
return isinstance(x, (int, long))
class String(Constant):
_settings = ModuleSettings(
configure_widget="vistrails.gui.modules.string_configure:TextConfigurationWidget",
constant_widgets=[
ConstantWidgetConfig('%s:MultiLineStringWidget' % constant_config_path,
widget_type='multiline'),
QueryWidgetConfig('%s:StringQueryWidget' % query_config_path)])
_output_ports = [OPort("value_as_string", "String", optional=True)]
default_value = ""
@staticmethod
def translate_to_python(x):
assert isinstance(x, (str, unicode))
return str(x)
@staticmethod
def validate(x):
return isinstance(x, str)
@staticmethod
def query_compute(value_a, value_b, query_method):
if query_method == '*[]*' or query_method is None:
return (value_b in value_a)
elif query_method == '==':
return (value_a == value_b)
elif query_method == '=~':
try:
m = re.match(value_b, value_a)
if m is not None:
return (m.end() ==len(value_a))
except re.error:
pass
return False
try:
from IPython import display
except ImportError:
display = None
class PathObject(object):
def __init__(self, name):
self.name = name
self._ipython_repr = None
def __repr__(self):
return "PathObject(%r)" % self.name
__str__ = __repr__
def __getattr__(self, name):
if name.startswith('_repr_') and name.endswith('_'):
if self._ipython_repr is None:
filetype, encoding = mimetypes.guess_type(self.name)
if filetype and filetype.startswith('image/'):
self._ipython_repr = display.Image(filename=self.name)
else:
self._ipython_repr = False
if self._ipython_repr is not False:
return getattr(self._ipython_repr, name)
raise AttributeError
class Path(Constant):
_settings = ModuleSettings(constant_widget=("%s:PathChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "Path"),
IPort("name", "String", optional=True)]
_output_ports = [OPort("value", "Path")]
@staticmethod
def translate_to_python(x):
return PathObject(x)
@staticmethod
def translate_to_string(x):
return str(x.name)
@staticmethod
def validate(v):
return isinstance(v, PathObject)
def get_name(self):
n = None
if self.has_input("value"):
n = self.get_input("value").name
if n is None:
self.check_input("name")
n = self.get_input("name")
return n
def set_results(self, n):
self.set_output("value", PathObject(n))
self.set_output("value_as_string", n)
def compute(self):
n = self.get_name()
self.set_results(n)
Path.default_value = PathObject('')
def path_parameter_hasher(p):
def get_mtime(path):
t = int(os.path.getmtime(path))
if os.path.isdir(path):
for subpath in os.listdir(path):
subpath = os.path.join(path, subpath)
if os.path.isdir(subpath):
t = max(t, get_mtime(subpath))
return t
h = vistrails.core.cache.hasher.Hasher.parameter_signature(p)
try:
# FIXME: This will break with aliases - I don't really care that much
t = get_mtime(p.strValue)
except OSError:
return h
hasher = sha_hash()
hasher.update(h)
hasher.update(str(t))
return hasher.digest()
class File(Path):
"""File is a VisTrails Module that represents a file stored on a
file system local to the machine where VisTrails is running."""
_settings = ModuleSettings(constant_signature=path_parameter_hasher,
constant_widget=("%s:FileChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "File"),
IPort("create_file", "Boolean", optional=True)]
_output_ports = [OPort("value", "File"),
OPort("local_filename", "String", optional=True)]
def compute(self):
n = self.get_name()
if (self.has_input("create_file") and self.get_input("create_file")):
vistrails.core.system.touch(n)
if not os.path.isfile(n):
raise ModuleError(self, 'File %r does not exist' % n)
self.set_results(n)
self.set_output("local_filename", n)
class Directory(Path):
_settings = ModuleSettings(constant_signature=path_parameter_hasher,
constant_widget=("%s:DirectoryChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "Directory"),
IPort("create_directory", "Boolean", optional=True)]
_output_ports = [OPort("value", "Directory"),
OPort("itemList", "List")]
def compute(self):
n = self.get_name()
if (self.has_input("create_directory") and
self.get_input("create_directory")):
try:
vistrails.core.system.mkdir(n)
except Exception, e:
raise ModuleError(self, 'mkdir: %s' % format_exception(e))
if not os.path.isdir(n):
raise ModuleError(self, 'Directory "%s" does not exist' % n)
self.set_results(n)
dir_list = os.listdir(n)
output_list = []
for item in dir_list:
full_path = os.path.join(n, item)
output_list.append(PathObject(full_path))
self.set_output('itemList', output_list)
class OutputPath(Path):
_settings = ModuleSettings(constant_widget=("%s:OutputPathChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "OutputPath")]
_output_ports = [OPort("value", "OutputPath")]
def get_name(self):
n = None
if self.has_input("value"):
n = self.get_input("value").name
if n is None:
self.check_input("name")
n = self.get_input("name")
return n
def set_results(self, n):
self.set_output("value", PathObject(n))
self.set_output("value_as_string", n)
def compute(self):
n = self.get_name()
self.set_results(n)
class FileSink(NotCacheable, Module):
"""FileSink takes a file and writes it to a user-specified
location in the file system. The file is stored at location
specified by the outputPath. The overwrite flag allows users to
specify whether an existing path should be overwritten."""
_input_ports = [IPort("file", File),
IPort("outputPath", OutputPath),
IPort("overwrite", Boolean, optional=True,
default=True),
IPort("publishFile", Boolean, optional=True)]
def compute(self):
input_file = self.get_input("file")
output_path = self.get_input("outputPath")
full_path = output_path.name
if os.path.isfile(full_path):
if self.get_input('overwrite'):
try:
os.remove(full_path)
except OSError, e:
msg = ('Could not delete existing path "%s" '
'(overwrite on)' % full_path)
raise ModuleError(self, msg)
else:
raise ModuleError(self,
"Could not copy file to '%s': file already "
"exists")
try:
vistrails.core.system.link_or_copy(input_file.name, full_path)
except OSError, e:
msg = "Could not create file '%s': %s" % (full_path, e)
raise ModuleError(self, msg)
if (self.has_input("publishFile") and
self.get_input("publishFile") or
not self.has_input("publishFile")):
if self.moduleInfo.has_key('extra_info'):
if self.moduleInfo['extra_info'].has_key('pathDumpCells'):
folder = self.moduleInfo['extra_info']['pathDumpCells']
base_fname = os.path.basename(full_path)
(base_fname, file_extension) = os.path.splitext(base_fname)
base_fname = os.path.join(folder, base_fname)
# make a unique filename
filename = base_fname + file_extension
counter = 2
while os.path.exists(filename):
filename = base_fname + "_%d%s" % (counter,
file_extension)
counter += 1
try:
vistrails.core.system.link_or_copy(input_file.name, filename)
except OSError, e:
msg = "Could not publish file '%s' \n on '%s':" % (
full_path, filename)
# I am not sure whether we should raise an error
# I will just print a warning for now (Emanuele)
debug.warning("%s" % msg, e)
class DirectorySink(NotCacheable, Module):
"""DirectorySink takes a directory and writes it to a
user-specified location in the file system. The directory is
stored at location specified by the outputPath. The overwrite
flag allows users to specify whether an existing path should be
overwritten."""
_input_ports = [IPort("dir", Directory),
IPort("outputPath", OutputPath),
IPort("overwrite", Boolean, optional=True, default="True")]
def compute(self):
input_dir = self.get_input("dir")
output_path = self.get_input("outputPath")
full_path = output_path.name
if os.path.exists(full_path):
if self.get_input("overwrite"):
try:
if os.path.isfile(full_path):
os.remove(full_path)
else:
shutil.rmtree(full_path)
except OSError, e:
msg = ('Could not delete existing path "%s" '
'(overwrite on)' % full_path)
raise ModuleError(
self,
'%s\n%s' % (msg, format_exception(e)))
else:
msg = ('Could not write to existing path "%s" '
'(overwrite off)' % full_path)
raise ModuleError(self, msg)
try:
shutil.copytree(input_dir.name, full_path)
except OSError, e:
msg = 'Could not copy path from "%s" to "%s"' % \
(input_dir.name, full_path)
raise ModuleError(self, '%s\n%s' % (msg, format_exception(e)))
class WriteFile(Converter):
"""Writes a String to a temporary File.
"""
_input_ports = [IPort('in_value', String),
IPort('suffix', String, optional=True, default=""),
IPort('encoding', String, optional=True)]
_output_ports = [OPort('out_value', File)]
def compute(self):
contents = self.get_input('in_value')
suffix = self.force_get_input('suffix', '')
result = self.interpreter.filePool.create_file(suffix=suffix)
if self.has_input('encoding'):
contents = contents.decode('utf-8') # VisTrails uses UTF-8
# internally (I hope)
contents = contents.encode(self.get_input('encoding'))
with open(result.name, 'wb') as fp:
fp.write(contents)
self.set_output('out_value', result)
class ReadFile(Converter):
"""Reads a File to a String.
"""
_input_ports = [IPort('in_value', File),
IPort('encoding', String, optional=True)]
_output_ports = [OPort('out_value', String)]
def compute(self):
filename = self.get_input('in_value').name
with open(filename, 'rb') as fp:
contents = fp.read()
if self.has_input('encoding'):
contents = contents.decode(self.get_input('encoding'))
contents = contents.encode('utf-8') # VisTrails uses UTF-8
# internally (for now)
self.set_output('out_value', contents)
class Color(Constant):
# We set the value of a color object to be an InstanceObject that
# contains a tuple because a tuple would be interpreted as a
# type(tuple) which messes with the interpreter
_settings = ModuleSettings(constant_widgets=[
'%s:ColorWidget' % constant_config_path,
ConstantWidgetConfig('%s:ColorEnumWidget' % \
constant_config_path,
widget_type='enum'),
QueryWidgetConfig('%s:ColorQueryWidget' % \
query_config_path),
ParamExpWidgetConfig('%s:RGBExploreWidget' % \
paramexp_config_path,
widget_type='rgb'),
ParamExpWidgetConfig('%s:HSVExploreWidget' % \
paramexp_config_path,
widget_type='hsv')])
_input_ports = [IPort("value", "Color")]
_output_ports = [OPort("value", "Color")]
default_value = InstanceObject(tuple=(1,1,1))
@staticmethod
def translate_to_python(x):
return InstanceObject(
tuple=tuple([float(a) for a in x.split(',')]))
@staticmethod
def translate_to_string(v):
return ','.join('%f' % c for c in v.tuple)
@staticmethod
def validate(x):
return isinstance(x, InstanceObject) and hasattr(x, 'tuple')
@staticmethod
def to_string(r, g, b):
return "%s,%s,%s" % (r,g,b)
@staticmethod
def query_compute(value_a, value_b, query_method):
# SOURCE: http://www.easyrgb.com/index.php?X=MATH
def rgb_to_xyz(r, g, b):
# r,g,b \in [0,1]
if r > 0.04045:
r = ( ( r + 0.055 ) / 1.055 ) ** 2.4
else:
r = r / 12.92
if g > 0.04045:
g = ( ( g + 0.055 ) / 1.055 ) ** 2.4
else:
g = g / 12.92
if b > 0.04045:
b = ( ( b + 0.055 ) / 1.055 ) ** 2.4
else:
b = b / 12.92
r *= 100
g *= 100
b *= 100
# Observer. = 2 deg, Illuminant = D65
x = r * 0.4124 + g * 0.3576 + b * 0.1805
y = r * 0.2126 + g * 0.7152 + b * 0.0722
z = r * 0.0193 + g * 0.1192 + b * 0.9505
return (x,y,z)
def xyz_to_cielab(x,y,z):
# Observer= 2 deg, Illuminant= D65
ref_x, ref_y, ref_z = (95.047, 100.000, 108.883)
x /= ref_x
y /= ref_y
z /= ref_z
if x > 0.008856:
x = x ** ( 1/3.0 )
else:
x = ( 7.787 * x ) + ( 16 / 116.0 )
if y > 0.008856:
y = y ** ( 1/3.0 )
else:
y = ( 7.787 * y ) + ( 16 / 116.0 )
if z > 0.008856:
z = z ** ( 1/3.0 )
else:
z = ( 7.787 * z ) + ( 16 / 116.0 )
L = ( 116 * y ) - 16
a = 500 * ( x - y )
b = 200 * ( y - z )
return (L, a, b)
def rgb_to_cielab(r,g,b):
return xyz_to_cielab(*rgb_to_xyz(r,g,b))
value_a_rgb = (float(a) for a in value_a.split(','))
value_b_rgb = (float(b) for b in value_b.split(','))
value_a_lab = rgb_to_cielab(*value_a_rgb)
value_b_lab = rgb_to_cielab(*value_b_rgb)
# cie76 difference
diff = sum((v_1 - v_2) ** 2
for v_1, v_2 in izip(value_a_lab, value_b_lab)) ** (0.5)
# print "CIE 76 DIFFERENCE:", diff
if query_method is None:
query_method = '2.3'
return diff < float(query_method)
class StandardOutput(NotCacheable, Module):
"""StandardOutput is a VisTrails Module that simply prints the
value connected on its port to standard output. It is intended
mostly as a debugging device."""
_input_ports = [IPort("value", 'Variant')]
def compute(self):
v = self.get_input("value")
if isinstance(v, PathObject):
try:
fp = open(v.name, 'rb')
except IOError:
print v
else:
try:
CHUNKSIZE = 2048
chunk = fp.read(CHUNKSIZE)
if chunk:
sys.stdout.write(chunk)
while len(chunk) == CHUNKSIZE:
chunk = fp.read(CHUNKSIZE)
if chunk:
sys.stdout.write(chunk)
sys.stdout.write('\n')
finally:
fp.close()
else:
print v
class Tuple(Module):
"""Tuple represents a tuple of values. Tuple might not be well
integrated with the rest of VisTrails, so don't use it unless
you know what you're doing."""
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.tuple_configuration:TupleConfigurationWidget")
def __init__(self):
Module.__init__(self)
self.input_ports_order = []
self.values = tuple()
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.input_ports_order = [p.name for p in module.input_port_specs]
def compute(self):
values = tuple([self.get_input(p)
for p in self.input_ports_order])
self.values = values
self.set_output("value", values)
class Untuple(Module):
"""Untuple takes a tuple and returns the individual values. It
reverses the actions of Tuple.
"""
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.tuple_configuration:UntupleConfigurationWidget")
def __init__(self):
Module.__init__(self)
self.output_ports_order = []
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.output_ports_order = [p.name for p in module.output_port_specs]
# output_ports are reversed for display purposes...
self.output_ports_order.reverse()
def compute(self):
if self.has_input("tuple"):
tuple = self.get_input("tuple")
values = tuple.values
else:
values = self.get_input("value")
for p, value in izip(self.output_ports_order, values):
self.set_output(p, value)
class ConcatenateString(Module):
"""ConcatenateString takes many strings as input and produces the
concatenation as output. Useful for constructing filenames, for
example.
This class will probably be replaced with a better API in the
future."""
fieldCount = 4
_input_ports = [IPort("str%d" % i, "String")
for i in xrange(1, 1 + fieldCount)]
_output_ports = [OPort("value", "String")]
def compute(self):
result = "".join(self.force_get_input('str%d' % i, '')
for i in xrange(1, 1 + self.fieldCount))
self.set_output('value', result)
class Not(Module):
"""Not inverts a Boolean.
"""
_input_ports = [IPort('input', 'Boolean')]
_output_ports = [OPort('value', 'Boolean')]
def compute(self):
value = self.get_input('input')
self.set_output('value', not value)
class ListType(object):
__metaclass__ = ABCMeta
ListType.register(list)
try:
import numpy
except ImportError:
numpy = None
else:
ListType.register(numpy.ndarray)
class List(Constant):
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.list_configuration:ListConfigurationWidget")
_input_ports = [IPort("value", "List"),
IPort("head", "Variant", depth=1),
IPort("tail", "List")]
_output_ports = [OPort("value", "List")]
default_value = []
def __init__(self):
Constant.__init__(self)
self.input_ports_order = []
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.input_ports_order = [p.name for p in module.input_port_specs]
@staticmethod
def validate(x):
return isinstance(x, ListType)
@staticmethod
def translate_to_python(v):
return literal_eval(v)
@staticmethod
def translate_to_string(v, dims=None):
if dims is None:
if numpy is not None and isinstance(v, numpy.ndarray):
dims = v.ndim
else:
dims = 1
if dims == 1:
return '[%s]' % ', '.join(repr(c)
for c in v)
else:
return '[%s]' % ', '.join(List.translate_to_string(c, dims-1)
for c in v)
def compute(self):
head, middle, items, tail = [], [], [], []
got_value = False
if self.has_input('value'):
# run the regular compute here
Constant.compute(self)
middle = self.outputPorts['value']
got_value = True
if self.has_input('head'):
head = self.get_input('head')
got_value = True
if self.input_ports_order:
items = [self.get_input(p)
for p in self.input_ports_order]
got_value = True
if self.has_input('tail'):
tail = self.get_input('tail')
got_value = True
if not got_value:
self.get_input('value')
self.set_output('value', head + middle + items + tail)
class Dictionary(Constant):
default_value = {}
_input_ports = [CIPort("addPair", "Module, Module"),
IPort("addPairs", "List")]
@staticmethod
def translate_to_python(v):
return literal_eval(v)
@staticmethod
def validate(x):
return isinstance(x, dict)
def compute(self):
d = {}
if self.has_input('value'):
Constant.compute(self)
d.update(self.outputPorts['value'])
if self.has_input('addPair'):
pairs_list = self.get_input_list('addPair')
d.update(pairs_list)
if self.has_input('addPairs'):
d.update(self.get_input('addPairs'))
self.set_output("value", d)
class Null(Module):
"""Null is the class of None values."""
_settings = ModuleSettings(hide_descriptor=True)
def compute(self):
self.set_output("value", None)
class Unpickle(Module):
"""Unpickles a string.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('input', 'String')]
_output_ports = [OPort('result', 'Variant')]
def compute(self):
value = self.get_input('input')
self.set_output('result', pickle.loads(value))
class CodeRunnerMixin(object):
def __init__(self):
self.output_ports_order = []
super(CodeRunnerMixin, self).__init__()
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.output_ports_order = [p.name for p in module.output_port_specs]
# output_ports are reversed for display purposes...
self.output_ports_order.reverse()
def run_code(self, code_str,
use_input=False,
use_output=False):
"""run_code runs a piece of code as a VisTrails module.
use_input and use_output control whether to use the inputport
and output port dictionary as local variables inside the
execution."""
import vistrails.core.packagemanager
def fail(msg):
raise ModuleError(self, msg)
def cache_this():
self.is_cacheable = lambda *args, **kwargs: True
locals_ = locals()
if use_input:
for k in self.inputPorts:
locals_[k] = self.get_input(k)
if use_output:
for output_portname in self.output_ports_order:
if output_portname not in self.inputPorts:
locals_[output_portname] = None
_m = vistrails.core.packagemanager.get_package_manager()
reg = get_module_registry()
locals_.update({'fail': fail,
'package_manager': _m,
'cache_this': cache_this,
'registry': reg,
'self': self})
if 'source' in locals_:
del locals_['source']
# Python 2.6 needs code to end with newline
exec code_str + '\n' in locals_, locals_
if use_output:
for k in self.output_ports_order:
if locals_.get(k) is not None:
self.set_output(k, locals_[k])
class PythonSource(CodeRunnerMixin, NotCacheable, Module):
"""PythonSource is a Module that executes an arbitrary piece of
Python code.
It is especially useful for one-off pieces of 'glue' in a
pipeline.
If you want a PythonSource execution to fail, call
fail(error_message).
If you want a PythonSource execution to be cached, call
cache_this().
"""
_settings = ModuleSettings(
configure_widget=("vistrails.gui.modules.python_source_configure:"
"PythonSourceConfigurationWidget"))
_input_ports = [IPort('source', 'String', optional=True, default="")]
_output_pors = [OPort('self', 'Module')]
def compute(self):
s = urllib.unquote(str(self.get_input('source')))
self.run_code(s, use_input=True, use_output=True)
def zip_extract_file(archive, filename_in_archive, output_filename):
z = zipfile.ZipFile(archive)
try:
fileinfo = z.getinfo(filename_in_archive) # Might raise KeyError
output_dirname, output_filename = os.path.split(output_filename)
fileinfo.filename = output_filename
z.extract(fileinfo, output_dirname)
finally:
z.close()
def zip_extract_all_files(archive, output_path):
z = zipfile.ZipFile(archive)
try:
z.extractall(output_path)
finally:
z.close()
class Unzip(Module):
"""Unzip extracts a file from a ZIP archive."""
_input_ports = [IPort('archive_file', 'File'),
IPort('filename_in_archive', 'String')]
_output_ports = [OPort('file', 'File')]
def compute(self):
self.check_input("archive_file")
self.check_input("filename_in_archive")
filename_in_archive = self.get_input("filename_in_archive")
archive_file = self.get_input("archive_file")
if not os.path.isfile(archive_file.name):
raise ModuleError(self, "archive file does not exist")
suffix = self.interpreter.filePool.guess_suffix(filename_in_archive)
output = self.interpreter.filePool.create_file(suffix=suffix)
zip_extract_file(archive_file.name,
filename_in_archive,
output.name)
self.set_output("file", output)
class UnzipDirectory(Module):
"""UnzipDirectory extracts every file from a ZIP archive."""
_input_ports = [IPort('archive_file', 'File')]
_output_ports = [OPort('directory', 'Directory')]
def compute(self):
self.check_input("archive_file")
archive_file = self.get_input("archive_file")
if not os.path.isfile(archive_file.name):
raise ModuleError(self, "archive file does not exist")
output = self.interpreter.filePool.create_directory()
zip_extract_all_files(archive_file.name,
output.name)
self.set_output("directory", output)
class Round(Converter):
"""Turns a Float into an Integer.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('in_value', 'Float'),
IPort('floor', 'Boolean', optional=True, default="True")]
_output_ports = [OPort('out_value', 'Integer')]
def compute(self):
fl = self.get_input('in_value')
floor = self.get_input('floor')
if floor:
integ = int(fl) # just strip the decimals
else:
integ = int(fl + 0.5) # nearest
self.set_output('out_value', integ)
class TupleToList(Converter):
"""Turns a Tuple into a List.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('in_value', 'Variant')]
_output_ports = [OPort('out_value', 'List')]
@classmethod
def can_convert(cls, sub_descs, super_descs):
if len(sub_descs) <= 1:
return False
reg = get_module_registry()
return super_descs == [reg.get_descriptor(List)]
def compute(self):
tu = self.get_input('in_value')
if not isinstance(tu, tuple):
raise ModuleError(self, "Input is not a tuple")
self.set_output('out_value', list(tu))
class Variant(Module):
"""
Variant is tracked internally for outputing a variant type on
output port. For input port, Module type should be used
"""
_settings = ModuleSettings(abstract=True)
class Generator(object):
"""
Used to keep track of list iteration, it will execute a module once for
each input in the list/generator.
"""
_settings = ModuleSettings(abstract=True)
generators = []
def __init__(self, size=None, module=None, generator=None, port=None,
accumulated=False):
self.module = module
self.generator = generator
self.port = port
self.size = size
self.accumulated = accumulated
if generator and module not in Generator.generators:
# add to global list of generators
# they will be topologically ordered
module.generator = generator
Generator.generators.append(module)
def next(self):
""" return next value - the generator """
value = self.module.get_output(self.port)
if isinstance(value, Generator):
value = value.all()
return value
def all(self):
""" exhausts next() for Streams
"""
items = []
item = self.next()
while item is not None:
items.append(item)
item = self.next()
return items
@staticmethod
def stream():
""" executes all generators until inputs are exhausted
this makes sure branching and multiple sinks are executed correctly
"""
result = True
if not Generator.generators:
return
while result is not None:
for g in Generator.generators:
result = g.next()
Generator.generators = []
class Assert(Module):
"""
Assert is a simple module that conditionally stops the execution.
"""
_input_ports = [IPort('condition', 'Boolean')]
def compute(self):
condition = self.get_input('condition')
if not condition:
raise ModuleError(self, "Assert: condition is False",
abort=True)
class AssertEqual(Module):
"""
AssertEqual works like Assert but compares two values.
It is provided for convenience.
"""
_input_ports = [IPort('value1', 'Variant'),
IPort('value2', 'Variant')]
def compute(self):
values = (self.get_input('value1'),
self.get_input('value2'))
if values[0] != values[1]:
reprs = tuple(repr(v) for v in values)
reprs = tuple('%s...' % v[:17] if len(v) > 20 else v
for v in reprs)
raise ModuleError(self, "AssertEqual: values are different: "
"%r, %r" % reprs,
abort=True)
class StringFormat(Module):
"""
Builds a string from objects using Python's str.format().
"""
_settings = ModuleSettings(configure_widget=
'vistrails.gui.modules.stringformat_configuration:'
'StringFormatConfigurationWidget')
_input_ports = [IPort('format', String)]
_output_ports = [OPort('value', String)]
@staticmethod
def list_placeholders(fmt):
placeholders = set()
nb = 0
i = 0
n = len(fmt)
while i < n:
if fmt[i] == '{':
i += 1
if fmt[i] == '{': # KeyError:
i += 1
continue
e = fmt.index('}', i) # KeyError
f = e
for c in (':', '!', '[', '.'):
c = fmt.find(c, i)
if c != -1:
f = min(f, c)
if i == f:
nb += 1
else:
arg = fmt[i:f]
try:
arg = int(arg)
except ValueError:
placeholders.add(arg)
else:
nb = max(nb, arg + 1)
i = e
i += 1
return nb, placeholders
def compute(self):
fmt = self.get_input('format')
args, kwargs = StringFormat.list_placeholders(fmt)
f_args = [self.get_input('_%d' % n)
for n in xrange(args)]
f_kwargs = dict((n, self.get_input(n))
for n in kwargs)
self.set_output('value', fmt.format(*f_args, **f_kwargs))
def init_constant(m):
reg = get_module_registry()
reg.add_module(m)
reg.add_input_port(m, "value", m)
reg.add_output_port(m, "value", m)
_modules = [Module, Converter, Constant, Boolean, Float, Integer, String, List,
Path, File, Directory, OutputPath,
FileSink, DirectorySink, WriteFile, ReadFile, StandardOutput,
Tuple, Untuple, ConcatenateString, Not, Dictionary, Null, Variant,
Unpickle, PythonSource, Unzip, UnzipDirectory, Color,
Round, TupleToList, Assert, AssertEqual, StringFormat]
def initialize(*args, **kwargs):
# initialize the sub_module modules, too
import vistrails.core.modules.sub_module
import vistrails.core.modules.output_modules
_modules.extend(vistrails.core.modules.sub_module._modules)
_modules.extend(vistrails.core.modules.output_modules._modules)
def handle_module_upgrade_request(controller, module_id, pipeline):
from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler
reg = get_module_registry()
def outputName_remap(old_conn, new_module):
ops = []
old_src_module = pipeline.modules[old_conn.source.moduleId]
op_desc = reg.get_descriptor(OutputPath)
new_x = (old_src_module.location.x + new_module.location.x) / 2.0
new_y = (old_src_module.location.y + new_module.location.y) / 2.0
op_module = \
controller.create_module_from_descriptor(op_desc, new_x, new_y)
ops.append(('add', op_module))
create_new_connection = UpgradeWorkflowHandler.create_new_connection
new_conn_1 = create_new_connection(controller,
old_src_module,
old_conn.source,
op_module,
"name")
ops.append(('add', new_conn_1))
new_conn_2 = create_new_connection(controller,
op_module,
"value",
new_module,
"outputPath")
ops.append(('add', new_conn_2))
return ops
module_remap = {'FileSink':
[(None, '1.6', None,
{'dst_port_remap':
{'overrideFile': 'overwrite',
'outputName': outputName_remap},
'function_remap':
{'overrideFile': 'overwrite',
'outputName': 'outputPath'}})],
'GetItemsFromDirectory':
[(None, '1.6', 'Directory',
{'dst_port_remap':
{'dir': 'value'},
'src_port_remap':
{'itemlist': 'itemList'},
})],
'InputPort':
[(None, '1.6', None,
{'dst_port_remap': {'old_name': None}})],
'OutputPort':
[(None, '1.6', None,
{'dst_port_remap': {'old_name': None}})],
'PythonSource':
[(None, '1.6', None, {})],
'Tuple':
[(None, '2.1.1', None, {})],
'StandardOutput':
[(None, '2.1.1', None, {})],
'List':
[(None, '2.1.1', None, {})],
'AssertEqual':
[(None, '2.1.1', None, {})],
'Converter':
[(None, '2.1.1', None, {})],
}
return UpgradeWorkflowHandler.remap_module(controller, module_id, pipeline,
module_remap)
class NewConstant(Constant):
"""
A new Constant module to be used inside the FoldWithModule module.
"""
def setValue(self, v):
self.set_output("value", v)
self.upToDate = True
def create_constant(value):
"""
Creates a NewConstant module, to be used for the ModuleConnector.
"""
constant = NewConstant()
constant.setValue(value)
return constant
def get_module(value, signature=None):
"""
Creates a module for value, in order to do the type checking.
"""
if isinstance(value, Constant):
return type(value)
elif isinstance(value, bool):
return Boolean
elif isinstance(value, str):
return String
elif isinstance(value, int):
return Integer
elif isinstance(value, float):
return Float
if isinstance(value, list):
return List
elif isinstance(value, tuple):
# Variant supports signatures of any length
if signature is None or \
(len(signature) == 1 and signature[0][0] == Variant):
return (Variant,)*len(value)
v_modules = ()
for element in xrange(len(value)):
v_modules += (get_module(value[element], signature[element]),)
if None in v_modules: # Identification failed
return None
return v_modules
else: # pragma: no cover
debug.warning("Could not identify the type of the list element.")
debug.warning("Type checking is not going to be done inside "
"iterated module.")
return None
import sys
import unittest
class TestConcatenateString(unittest.TestCase):
@staticmethod
def concatenate(**kwargs):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(ConcatenateString, 'value') as results:
errors = execute([
('ConcatenateString', 'org.vistrails.vistrails.basic', [
(name, [('String', value)])
for name, value in kwargs.iteritems()
]),
])
if errors:
return None
return results
def test_concatenate(self):
"""Concatenates strings"""
self.assertEqual(self.concatenate(
str1="hello ", str2="world"),
["hello world"])
self.assertEqual(self.concatenate(
str3="hello world"),
["hello world"])
self.assertEqual(self.concatenate(
str2="hello ", str4="world"),
["hello world"])
self.assertEqual(self.concatenate(
str1="hello", str3=" ", str4="world"),
["hello world"])
def test_empty(self):
"""Runs ConcatenateString with no input"""
self.assertEqual(self.concatenate(), [""])
class TestNot(unittest.TestCase):
def run_pipeline(self, functions):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(Not, 'value') as results:
errors = execute([
('Not', 'org.vistrails.vistrails.basic',
functions),
])
return errors, results
def test_true(self):
errors, results = self.run_pipeline([
('input', [('Boolean', 'True')])])
self.assertFalse(errors)
self.assertEqual(len(results), 1)
self.assertIs(results[0], False)
def test_false(self):
errors, results = self.run_pipeline([
('input', [('Boolean', 'False')])])
self.assertFalse(errors)
self.assertEqual(len(results), 1)
self.assertIs(results[0], True)
def test_notset(self):
errors, results = self.run_pipeline([])
self.assertTrue(errors)
class TestList(unittest.TestCase):
@staticmethod
def build_list(value=None, head=None, tail=None):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(List, 'value') as results:
functions = []
def add(n, v, t):
if v is not None:
for e in v:
functions.append(
(n, [(t, e)])
)
add('value', value, 'List')
add('head', head, 'String')
add('tail', tail, 'List')
errors = execute([
('List', 'org.vistrails.vistrails.basic', functions),
])
if errors:
return None
# List is a Constant, so the interpreter will set the result 'value'
# from the 'value' input port automatically
# Ignore these first results
return results[-1]
def test_simple(self):
"""Tests the default ports of the List module"""
self.assertEqual(self.build_list(
value=['["a", "b", "c"]']),
["a", "b", "c"])
self.assertEqual(self.build_list(
head=["d"],
value=['["a", "b", "c"]']),
["d", "a", "b", "c"])
self.assertEqual(self.build_list(
head=["d"],
value=['["a", "b", "c"]'],
tail=['["e", "f"]']),
["d", "a", "b", "c", "e", "f"])
self.assertEqual(self.build_list(
value=['[]'],
tail=['[]']),
[])
def test_multiple(self):
"""Tests setting multiple values on a port"""
# Multiple values on 'head'
self.assertEqual(self.build_list(
head=["a", "b"]),
["a", "b"])
self.assertEqual(self.build_list(
head=["a", "b"],
value=['["c", "d"]']),
["a", "b", "c", "d"])
# Multiple values on 'value'
res = self.build_list(value=['["a", "b"]', '["c", "d"]'])
# Connections of List type are merged
self.assertEqual(res, ["a", "b", "c", "d"])
def test_items(self):
"""Tests the multiple 'itemN' ports"""
from vistrails.tests.utils import execute, intercept_result
def list_with_items(nb_items, **kwargs):
with intercept_result(List, 'value') as results:
errors = execute([
('List', 'org.vistrails.vistrails.basic', [
(k, [('String', v)])
for k, v in kwargs.iteritems()
]),
],
add_port_specs=[
(0, 'input', 'item%d' % i,
'(org.vistrails.vistrails.basic:Module)')
for i in xrange(nb_items)
])
if errors:
return None
return results[-1]
self.assertEqual(
list_with_items(2, head="one", item0="two", item1="three"),
["one", "two", "three"])
# All 'itemN' ports have to be set
self.assertIsNone(
list_with_items(3, head="one", item0="two", item2="three"))
class TestPythonSource(unittest.TestCase):
def test_simple(self):
"""A simple PythonSource returning a string"""
import urllib2
from vistrails.tests.utils import execute, intercept_result
source = 'customout = "nb is %d" % customin'
source = urllib2.quote(source)
with intercept_result(PythonSource, 'customout') as results:
self.assertFalse(execute([
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', source)]),
('customin', [('Integer', '42')])
]),
('String', 'org.vistrails.vistrails.basic', []),
],
[
(0, 'customout', 1, 'value'),
],
add_port_specs=[
(0, 'input', 'customin',
'org.vistrails.vistrails.basic:Integer'),
(0, 'output', 'customout',
'org.vistrails.vistrails.basic:String'),
]))
self.assertEqual(results[-1], "nb is 42")
class TestNumericConversions(unittest.TestCase):
def test_full(self):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(Round, 'out_value') as results:
self.assertFalse(execute([
('Integer', 'org.vistrails.vistrails.basic', [
('value', [('Integer', '5')])
]),
('Float', 'org.vistrails.vistrails.basic', []),
('PythonCalc', 'org.vistrails.vistrails.pythoncalc', [
('value2', [('Float', '2.7')]),
('op', [('String', '+')]),
]),
('Round', 'org.vistrails.vistrails.basic', [
('floor', [('Boolean', 'True')]),
]),
],
[
(0, 'value', 1, 'value'),
(1, 'value', 2, 'value1'),
(2, 'value', 3, 'in_value'),
]))
self.assertEqual(results, [7])
class TestUnzip(unittest.TestCase):
def test_unzip_file(self):
from vistrails.tests.utils import execute, intercept_result
from vistrails.core.system import vistrails_root_directory
zipfile = os.path.join(vistrails_root_directory(),
'tests', 'resources',
'test_archive.zip')
with intercept_result(Unzip, 'file') as outfiles:
self.assertFalse(execute([
('Unzip', 'org.vistrails.vistrails.basic', [
('archive_file', [('File', zipfile)]),
('filename_in_archive', [('String', 'file1.txt')]),
]),
]))
self.assertEqual(len(outfiles), 1)
with open(outfiles[0].name, 'rb') as outfile:
self.assertEqual(outfile.read(), "some random\ncontent")
def test_unzip_all(self):
from vistrails.tests.utils import execute, intercept_result
from vistrails.core.system import vistrails_root_directory
zipfile = os.path.join(vistrails_root_directory(),
'tests', 'resources',
'test_archive.zip')
with intercept_result(UnzipDirectory, 'directory') as outdir:
self.assertFalse(execute([
('UnzipDirectory', 'org.vistrails.vistrails.basic', [
('archive_file', [('File', zipfile)]),
]),
]))
self.assertEqual(len(outdir), 1)
self.assertEqual(
[(d, f) for p, d, f in os.walk(outdir[0].name)],
[(['subdir'], ['file1.txt']),
([], ['file2.txt'])])
from vistrails.core.configuration import get_vistrails_configuration
class TestTypechecking(unittest.TestCase):
@classmethod
def setUpClass(cls):
conf = get_vistrails_configuration()
cls.error_all = conf.showConnectionErrors
cls.error_variant = conf.showVariantErrors
@classmethod
def tearDownClass(cls):
conf = get_vistrails_configuration()
conf.showConnectionErrors = cls.error_all
conf.showVariantErrors = cls.error_variant
@staticmethod
def set_settings(error_all, error_variant):
conf = get_vistrails_configuration()
conf.showConnectionErrors = error_all
conf.showVariantErrors = error_variant
def run_test_pipeline(self, result, expected_results, *args, **kwargs):
from vistrails.tests.utils import execute, intercept_result
for error_all, error_variant, expected in expected_results:
self.set_settings(error_all, error_variant)
with intercept_result(*result) as results:
error = execute(*args, **kwargs)
if not expected:
self.assertTrue(error)
else:
self.assertFalse(error)
self.assertEqual(results, expected)
def test_basic(self):
import urllib2
# Base case: no typing error
# This should succeed in every case
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, ["test"]),
(True, True, ["test"])],
[
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('o = "test"'))]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'o', 1, 'i'),
],
add_port_specs=[
(0, 'output', 'o',
'org.vistrails.vistrails.basic:String'),
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String')
])
def test_fake(self):
import urllib2
# A module is lying, declaring a String but returning an int
# This should fail with showConnectionErrors=True (not the
# default)
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, [42]),
(False, True, [42]),
(True, True, False)],
[
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('o = 42'))]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'o', 1, 'i'),
],
add_port_specs=[
(0, 'output', 'o',
'org.vistrails.vistrails.basic:String'),
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String')
])
def test_inputport(self):
import urllib2
# This test uses an InputPort module, whose output port should not be
# considered a Variant port (although it is)
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, [42]),
(False, True, [42]),
(True, True, [42])],
[
('InputPort', 'org.vistrails.vistrails.basic', [
('ExternalPipe', [('Integer', '42')]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'InternalPipe', 1, 'i'),
],
add_port_specs=[
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String'),
])
class TestStringFormat(unittest.TestCase):
def test_list_placeholders(self):
fmt = 'a {} b}} {c!s} {{d e}} {}f'
self.assertEqual(StringFormat.list_placeholders(fmt),
(2, set(['c'])))
def run_format(self, fmt, expected, **kwargs):
from vistrails.tests.utils import execute, intercept_result
functions = [('format', [('String', fmt)])]
functions.extend((n, [(t, v)])
for n, (t, v) in kwargs.iteritems())
with intercept_result(StringFormat, 'value') as results:
self.assertFalse(execute([
('StringFormat', 'org.vistrails.vistrails.basic',
functions),
],
add_port_specs=[
(0, 'input', n, t)
for n, (t, v) in kwargs.iteritems()
]))
self.assertEqual(results, [expected])
def test_format(self):
self.run_format('{{ {a} }} b {c!s}', '{ 42 } b 12',
a=('Integer', '42'),
c=('Integer', '12'))
# Python 2.6 doesn't support {}
@unittest.skipIf(sys.version_info < (2, 7), "No {} support on 2.6")
def test_format_27(self):
self.run_format('{} {}', 'a b',
_0=('String', 'a'), _1=('String', 'b'))
self.run_format('{{ {a} {} {b!s}', '{ 42 b 12',
a=('Integer', '42'), _0=('String', 'b'),
b=('Integer', '12'))
self.run_format('{} {} {!r}{ponc} {:.2f}', "hello dear 'world'! 1.33",
_0=('String', 'hello'), _1=('String', 'dear'),
_2=('String', 'world'), _3=('Float', '1.333333333'),
ponc=('String', '!'))
class TestConstantMetaclass(unittest.TestCase):
def test_meta(self):
"""Tests the __metaclass__ for Constant.
"""
mod1_in = [('value', 'basic:String'), IPort('other', 'basic:Float')]
mod1_out = [('someport', 'basic:Integer')]
class Mod1(Constant):
_input_ports = mod1_in
_output_ports = mod1_out
self.assertEqual(Mod1._input_ports, mod1_in)
self.assertEqual(Mod1._output_ports, [('value', Mod1)] + mod1_out)
mod2_in = [('another', 'basic:String')]
class Mod2(Mod1):
_input_ports = mod2_in
self.assertEqual(Mod2._input_ports, [('value', Mod2)] + mod2_in)
self.assertEqual(Mod2._output_ports, [('value', Mod2)])
class Mod3(Mod1):
_output_ports = []
self.assertEqual(Mod3._input_ports, [('value', Mod3)])
self.assertEqual(Mod3._output_ports, [('value', Mod3)])
|
"""Base class for all the objects in SymPy"""
from __future__ import print_function, division
from .assumptions import BasicMeta, ManagedProperties
from .cache import cacheit
from .sympify import _sympify, sympify, SympifyError
from .compatibility import (iterable, Iterator, ordered,
string_types, with_metaclass, zip_longest, range)
from .singleton import S
from inspect import getmro
class Basic(with_metaclass(ManagedProperties)):
"""
Base class for all objects in SymPy.
Conventions:
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
"""
__slots__ = ['_mhash', # hash value
'_args', # arguments
'_assumptions'
]
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'hermitian': True,
'imaginary': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real': True,
'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
from sympy import Pow
if self is other:
return True
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(self, UndefFunc) and isinstance(other, UndefFunc):
if self.class_key() == other.class_key():
return True
else:
return False
if type(self) is not type(other):
# issue 6100 a**1.0 == a like a**2.0 == a**2
if isinstance(self, Pow) and self.exp == 1:
return self.base == other
if isinstance(other, Pow) and other.exp == 1:
return self == other.base
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other
if isinstance(self, AppliedUndef) and isinstance(other,
AppliedUndef):
if self.class_key() != other.class_key():
return False
elif type(self) is not type(other):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""a != b -> Compare two symbolic trees and see whether they are different
this is the same as:
a.compare(b) != 0
but faster
"""
return not self.__eq__(other)
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
dummy_symbols = [s for s in self.free_symbols if s.is_Dummy]
if not dummy_symbols:
return self == other
elif len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
raise ValueError(
"only one dummy symbol allowed on the left-hand side")
if symbol is None:
symbols = other.free_symbols
if not symbols:
return self == other
elif len(symbols) == 1:
symbol = symbols.pop()
else:
raise ValueError("specify a symbol in which expressions should be compared")
tmp = dummy.__class__()
return self.subs(dummy, tmp) == other.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 5487.
def __repr__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
set([1, 2, I, pi, x, y])
If one or more types are given, the results will contain only
those types of atoms.
Examples
========
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
set([x, y])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
set([1, 2])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
set([1, 2, pi])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
set([1, 2, I, pi])
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
set([x, y])
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
set([1])
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
set([1, 2])
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
set([f(x), sin(y + I*pi)])
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
set([f(x)])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
set([I*pi, 2*sin(y + I*pi)])
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for expr in preorder_traversal(self):
if isinstance(expr, types):
result.add(expr)
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method."""
return set().union(*[a.free_symbols for a in self.args])
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.variables`` as underscore-suffixed numbers
corresponding to their position in ``self.variables``. Enough
underscores are added to ensure that there will be no clash with
existing free symbols.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: 0_}
"""
from sympy import Symbol
if not hasattr(self, 'variables'):
return {}
u = "_"
while any(s.name.endswith(u) for s in self.free_symbols):
u += "_"
name = '%%i%s' % u
V = self.variables
return dict(list(zip(V, [Symbol(name % i, **v.assumptions0)
for i, v in enumerate(V)])))
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x, y, z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
from sympy import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
(or already is a real number) with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
A False result does not mean that `self` cannot be rewritten
into a form that would be comparable. For example, the
difference computed below is zero but without simplification
it does not evaluate to a zero with precision:
>>> e = 2**pi*(1 + 2**pi)
>>> dif = e - e.expand()
>>> dif.is_comparable
False
>>> dif.n(2)._prec
1
"""
is_real = self.is_real
if is_real is False:
return False
is_number = self.is_number
if is_number is False:
return False
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not i.is_Number or not n.is_Number:
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False because numbers with
# imaginary parts can't be compared
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
from sympy.polys import Poly, PolynomialError
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except PolynomialError:
return None
def as_content_primitive(self, radical=False, clear=True):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See docstring of Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A, B, C, D, E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.containers import Dict
from sympy.utilities import default_sort_key
from sympy import Dummy, Symbol
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, dict)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i in range(len(sequence)):
s = list(sequence[i])
for j, si in enumerate(s):
try:
si = sympify(si, strict=True)
except SympifyError:
if type(si) is str:
si = Symbol(si)
else:
# if it can't be sympified, skip it
sequence[i] = None
break
s[j] = si
else:
sequence[i] = None if _aresame(*s) else tuple(s)
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
if not all(k.is_Atom for k in sequence):
d = {}
for o, n in sequence.items():
try:
ops = o.count_ops(), len(o.args)
except TypeError:
ops = (0, 0)
d.setdefault(ops, []).append((o, n))
newseq = []
for k in sorted(d.keys(), reverse=True):
newseq.extend(
sorted([v[0] for v in d[k]], key=default_sort_key))
sequence = [(k, sequence[k]) for k in newseq]
del newseq, d
else:
sequence = sorted([(k, v) for (k, v) in sequence.items()],
key=default_sort_key)
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy()
for old, new in sequence:
d = Dummy(commutative=new.is_commutative)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also: _subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
"""
Helper for xreplace. Tracks whether a replacement actually occurred.
"""
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
try:
a_xr = a._xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
except AttributeError:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicMeta):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
try:
match = pattern._has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
except AttributeError:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return self.__eq__
def replace(self, query, value, map=False, simultaneous=True, exact=False):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False. In addition, if an
expression containing more than one Wild symbol is being used to match
subexpressions and the ``exact`` flag is True, then the match will only
succeed if non-zero values are received for each Wild that appears in
the match pattern.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a = Wild('a')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
When the default value of False is used with patterns that have
more than one Wild symbol, non-intuitive results may be obtained:
>>> b = Wild('b')
>>> (2*x).replace(a*x + b, b - a)
2/x
For this reason, the ``exact`` option can be used to make the
replacement only when the match gives non-zero values for all
Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a, exact=True)
y - 2
>>> (2*x).replace(a*x + b, b - a, exact=True)
2*x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy
from sympy.simplify.simplify import bottom_up
try:
query = sympify(query)
except SympifyError:
pass
try:
value = sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
# XXX remove the exact flag and make multi-symbol
# patterns use exact=True semantics; to do this the query must
# be tested to find out how many Wild symbols are present.
# See https://groups.google.com/forum/
# ?fromgroups=#!topic/sympy/zPzo5FtRiqI
# for a method of inspecting a function to know how many
# parameters it has.
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**dict([(
str(key)[:-1], val) for key, val in result.items()]))
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**dict([(
str(key)[:-1], val) for key, val in result.items()]))
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this expression be changed during rebuilding
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy(commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
rv = rv.xreplace(r)
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = dict([(k.xreplace(r), v.xreplace(r))
for k, v in mapping.items()])
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
pattern = sympify(pattern)
return pattern.matches(self, old=old)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self.func, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args)
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], string_types):
rule = '_eval_rewrite_as_' + args[-1]
else:
rule = '_eval_rewrite_as_' + args[-1].__name__
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [p.__class__ for p in pattern if self.has(p)]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = []
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, ratio, measure):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
To SymPy, 2.0 == 2:
>>> from sympy import S
>>> 2.0 == S(2)
True
Since a simple 'same or not' result is sometimes useful, this routine was
written to provide that query:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .function import AppliedUndef, UndefinedFunction as UndefFunc
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
else:
return True
def _atomic(e):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
set([x, y])
>>> _atomic(x + f(y))
set([x, f(y)])
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
set([y, cos(x), Derivative(f(x), x)])
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
try:
free = e.free_symbols
except AttributeError:
return set([e])
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal(Iterator):
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
for subtree in self._preorder_traversal(arg, keys):
yield subtree
elif iterable(node):
for item in node:
for subtree in self._preorder_traversal(item, keys):
yield subtree
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
|
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: api@picarto.tv
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VideoSearchResult(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'channel': 'BasicChannelInfo',
'video': 'ChannelVideo'
}
attribute_map = {
'channel': 'channel',
'video': 'video'
}
def __init__(self, channel=None, video=None):
"""
VideoSearchResult - a model defined in Swagger
"""
self._channel = None
self._video = None
if channel is not None:
self.channel = channel
if video is not None:
self.video = video
@property
def channel(self):
"""
Gets the channel of this VideoSearchResult.
:return: The channel of this VideoSearchResult.
:rtype: BasicChannelInfo
"""
return self._channel
@channel.setter
def channel(self, channel):
"""
Sets the channel of this VideoSearchResult.
:param channel: The channel of this VideoSearchResult.
:type: BasicChannelInfo
"""
self._channel = channel
@property
def video(self):
"""
Gets the video of this VideoSearchResult.
:return: The video of this VideoSearchResult.
:rtype: ChannelVideo
"""
return self._video
@video.setter
def video(self, video):
"""
Sets the video of this VideoSearchResult.
:param video: The video of this VideoSearchResult.
:type: ChannelVideo
"""
self._video = video
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VideoSearchResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import six
from decimal import Decimal as D
from oscar.core.loading import get_model
from django.test import TestCase
from oscar.test import factories, decorators
from oscar.apps.partner import abstract_models
Partner = get_model('partner', 'Partner')
PartnerAddress = get_model('partner', 'PartnerAddress')
Country = get_model('address', 'Country')
class DummyWrapper(object):
def availability(self, stockrecord):
return 'Dummy response'
def dispatch_date(self, stockrecord):
return "Another dummy response"
class TestStockRecord(TestCase):
def setUp(self):
self.product = factories.create_product()
self.stockrecord = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'), num_in_stock=10)
@decorators.ignore_deprecation_warnings
def test_get_price_incl_tax_defaults_to_no_tax(self):
self.assertEqual(D('10.00'), self.stockrecord.price_incl_tax)
def test_get_price_excl_tax_returns_correct_value(self):
self.assertEqual(D('10.00'), self.stockrecord.price_excl_tax)
def test_net_stock_level_with_no_allocation(self):
self.assertEqual(10, self.stockrecord.net_stock_level)
def test_net_stock_level_with_allocation(self):
self.stockrecord.allocate(5)
self.assertEqual(10-5, self.stockrecord.net_stock_level)
def test_allocated_does_not_alter_num_in_stock(self):
self.stockrecord.allocate(5)
self.assertEqual(10, self.stockrecord.num_in_stock)
self.assertEqual(5, self.stockrecord.num_allocated)
def test_allocation_handles_null_value(self):
self.stockrecord.num_allocated = None
self.stockrecord.allocate(5)
def test_consuming_allocation(self):
self.stockrecord.allocate(5)
self.stockrecord.consume_allocation(3)
self.assertEqual(2, self.stockrecord.num_allocated)
self.assertEqual(7, self.stockrecord.num_in_stock)
def test_cancelling_allocation(self):
self.stockrecord.allocate(5)
self.stockrecord.cancel_allocation(4)
self.assertEqual(1, self.stockrecord.num_allocated)
self.assertEqual(10, self.stockrecord.num_in_stock)
def test_cancelling_allocation_ignores_too_big_allocations(self):
self.stockrecord.allocate(5)
self.stockrecord.cancel_allocation(6)
self.assertEqual(0, self.stockrecord.num_allocated)
self.assertEqual(10, self.stockrecord.num_in_stock)
@decorators.ignore_deprecation_warnings
def test_max_purchase_quantity(self):
self.assertEqual(10, self.stockrecord.max_purchase_quantity())
@decorators.ignore_deprecation_warnings
class CustomWrapperTests(TestCase):
"""
Partner wrappers are deprecated. This testcase will be removed/rewritten
in Oscar 0.7.
"""
def setUp(self):
abstract_models.partner_wrappers = {1: DummyWrapper()}
def tearDown(self):
abstract_models.partner_wrappers = None
def test_wrapper_availability_gets_called(self):
product = factories.create_product(
price=D('10.00'), partner_name="Acme", num_in_stock=10)
stockrecord = product.stockrecords.all()[0]
self.assertEqual(u"Dummy response",
six.text_type(stockrecord.availability))
def test_wrapper_dispatch_date_gets_called(self):
product = factories.create_product(
price=D('10.00'), partner_name="Acme", num_in_stock=10)
stockrecord = product.stockrecords.all()[0]
self.assertEqual("Another dummy response",
stockrecord.dispatch_date)
class TestPartnerAddress(TestCase):
def setUp(self):
self.partner = Partner._default_manager.create(
name="Dummy partner")
self.country = Country._default_manager.create(
iso_3166_1_a2='GB', name="UNITED KINGDOM")
self.address = PartnerAddress._default_manager.create(
title="Dr",
first_name="Barry",
last_name="Barrington",
country=self.country,
postcode="LS1 2HA",
partner=self.partner)
def test_can_get_primary_address(self):
self.assertEqual(self.partner.primary_address, self.address)
def test_fails_on_two_addresses(self):
self.address = PartnerAddress._default_manager.create(
title="Mrs",
first_name="Jane",
last_name="Barrington",
postcode="LS1 2HA",
country=self.country,
partner=self.partner)
self.assertRaises(
NotImplementedError, getattr, self.partner, 'primary_address')
|
from libmozdata import utils as lmdutils
from auto_nag import utils
from auto_nag.bzcleaner import BzCleaner
from auto_nag.escalation import Escalation, NoActivityDays
from auto_nag.nag_me import Nag
from auto_nag.round_robin import RoundRobin
class P1NoAssignee(BzCleaner, Nag):
def __init__(self):
super(P1NoAssignee, self).__init__()
self.escalation = Escalation(
self.people,
data=utils.get_config(self.name(), "escalation"),
skiplist=utils.get_config("workflow", "supervisor_skiplist", []),
)
self.round_robin = RoundRobin.get_instance()
self.components_skiplist = utils.get_config("workflow", "components_skiplist")
def description(self):
return "P1 Bugs, no assignee and no activity for few days"
def nag_template(self):
return self.template()
def get_extra_for_template(self):
return {"ndays": self.ndays}
def get_extra_for_nag_template(self):
return self.get_extra_for_template()
def get_extra_for_needinfo_template(self):
return self.get_extra_for_template()
def ignore_meta(self):
return True
def has_last_comment_time(self):
return True
def has_product_component(self):
return True
def columns(self):
return ["component", "id", "summary", "last_comment"]
def handle_bug(self, bug, data):
# check if the product::component is in the list
if utils.check_product_component(self.components_skiplist, bug):
return None
return bug
def get_mail_to_auto_ni(self, bug):
# For now, disable the needinfo
return None
# Avoid to ni everyday...
if self.has_bot_set_ni(bug):
return None
mail, nick = self.round_robin.get(bug, self.date)
if mail and nick:
return {"mail": mail, "nickname": nick}
return None
def set_people_to_nag(self, bug, buginfo):
priority = "high"
if not self.filter_bug(priority):
return None
owners = self.round_robin.get(bug, self.date, only_one=False, has_nick=False)
real_owner = bug["triage_owner"]
self.add_triage_owner(owners, real_owner=real_owner)
if not self.add(owners, buginfo, priority=priority):
self.add_no_manager(buginfo["id"])
return bug
def get_bz_params(self, date):
self.ndays = NoActivityDays(self.name()).get(
(utils.get_next_release_date() - self.nag_date).days
)
self.date = lmdutils.get_date_ymd(date)
fields = ["triage_owner", "flags"]
params = {
"bug_type": "defect",
"include_fields": fields,
"resolution": "---",
"f1": "priority",
"o1": "equals",
"v1": "P1",
"f2": "days_elapsed",
"o2": "greaterthaneq",
"v2": self.ndays,
}
utils.get_empty_assignees(params)
return params
if __name__ == "__main__":
P1NoAssignee().run()
|
L = [0, 1, 2, 3]
print('-------- Part A --------')
try:
L[4] # part (a) of question
except IndexError as err:
print('IndexError Exception', err)
print('-------- Part B --------')
sliced = L[-10:10]
print(sliced)
print('slicing out of bounds results in a new list equal in value to original')
print('if slices includes indices of original)')
print('-------- Part C --------')
sliced = L[3:1]
print(sliced)
sliced = L[3:1:-1]
print(sliced)
|
from django_nose.tools import assert_equal
from pontoon.base.tests import TestCase
from pontoon.base.utils import NewlineEscapePlaceable, mark_placeables
class PlaceablesTests(TestCase):
def test_newline_escape_placeable(self):
"""Test detecting newline escape sequences"""
placeable = NewlineEscapePlaceable
assert_equal(placeable.parse(u'A string\\n')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'\\nA string')[0], placeable([u'\\n']))
assert_equal(placeable.parse(u'A\\nstring')[1], placeable([u'\\n']))
assert_equal(placeable.parse(u'A string'), None)
assert_equal(placeable.parse(u'A\nstring'), None)
def test_mark_newline_escape_placeables(self):
"""Test detecting newline escape sequences"""
assert_equal(
mark_placeables(u'A string\\n'),
u'A string<mark class="placeable" title="Escaped newline">\\n</mark>'
)
assert_equal(
mark_placeables(u'\\nA string'),
u'<mark class="placeable" title="Escaped newline">\\n</mark>A string'
)
assert_equal(
mark_placeables(u'A\\nstring'),
u'A<mark class="placeable" title="Escaped newline">\\n</mark>string'
)
assert_equal(
mark_placeables(u'A string'),
u'A string'
)
assert_equal(
mark_placeables(u'A\nstring'),
u'A\nstring'
)
def test_python_new_format_placeables(self):
"""Test detection of the new format string in python strings."""
assert_equal(
mark_placeables(u'Hello {name}'),
u'Hello <mark class="placeable" title="Python format string">{name}</mark>'
)
assert_equal(
mark_placeables(u'Hello {name!s}'),
u'Hello <mark class="placeable" title="Python format string">{name!s}</mark>'
)
assert_equal(
mark_placeables(u'Hello {someone.name}'),
u'Hello <mark class="placeable" title="Python format string">{someone.name}</mark>'
)
assert_equal(
mark_placeables(u'Hello {name[0]}'),
u'Hello <mark class="placeable" title="Python format string">{name[0]}</mark>'
)
assert_equal(
mark_placeables(u'Hello {someone.name[0]}'),
u'Hello <mark class="placeable" title="Python format string">{someone.name[0]}</mark>'
)
def test_python_format_named_placeables(self):
"""Test detection of format string with named placeables."""
assert_equal(
mark_placeables(u'Hello %(name)s'),
u'Hello <mark class="placeable" title="Python format string">%(name)s</mark>'
)
assert_equal(
mark_placeables(u'Rolling %(number)d dices'),
u'Rolling <mark class="placeable" title="Python format string">%(number)d</mark> dices'
)
assert_equal(
mark_placeables(u'Hello %(name)S'),
u'Hello <mark class="placeable" title="Python format string">%(name)S</mark>'
)
assert_equal(
mark_placeables(u'Rolling %(number)D dices'),
u'Rolling <mark class="placeable" title="Python format string">%(number)D</mark> dices'
)
|
"""
"""
from __future__ import print_function
import numpy
from rdkit.ML.DecTree import SigTree
from rdkit.ML import InfoTheory
try:
from rdkit.ML.FeatureSelect import CMIM
except ImportError:
CMIM=None
from rdkit.DataStructs.VectCollection import VectCollection
import copy
import random
def _GenerateRandomEnsemble(nToInclude,nBits):
""" Generates a random subset of a group of indices
**Arguments**
- nToInclude: the size of the desired set
- nBits: the maximum index to be included in the set
**Returns**
a list of indices
"""
# Before Python 2.3 added the random.sample() function, this was
# way more complicated:
res = random.sample(range(nBits),nToInclude)
return res
def BuildSigTree(examples,nPossibleRes,ensemble=None,random=0,
metric=InfoTheory.InfoType.BIASENTROPY,
biasList=[1],
depth=0,maxDepth=-1,
useCMIM=0,allowCollections=False,
verbose=0,**kwargs):
"""
**Arguments**
- examples: the examples to be classified. Each example
should be a sequence at least three entries long, with
entry 0 being a label, entry 1 a BitVector and entry -1
an activity value
- nPossibleRes: the number of result codes possible
- ensemble: (optional) if this argument is provided, it
should be a sequence which is used to limit the bits
which are actually considered as potential descriptors.
The default is None (use all bits).
- random: (optional) If this argument is nonzero, it
specifies the number of bits to be randomly selected
for consideration at this node (i.e. this toggles the
growth of Random Trees).
The default is 0 (no random descriptor selection)
- metric: (optional) This is an _InfoTheory.InfoType_ and
sets the metric used to rank the bits.
The default is _InfoTheory.InfoType.BIASENTROPY_
- biasList: (optional) If provided, this provides a bias
list for the bit ranker.
See the _InfoTheory.InfoBitRanker_ docs for an explanation
of bias.
The default value is [1], which biases towards actives.
- maxDepth: (optional) the maximum depth to which the tree
will be grown
The default is -1 (no depth limit).
- useCMIM: (optional) if this is >0, the CMIM algorithm
(conditional mutual information maximization) will be
used to select the descriptors used to build the trees.
The value of the variable should be set to the number
of descriptors to be used. This option and the
ensemble option are mutually exclusive (CMIM will not be
used if the ensemble is set), but it happily coexsts
with the random argument (to only consider random subsets
of the top N CMIM bits)
The default is 0 (do not use CMIM)
- depth: (optional) the current depth in the tree
This is used in the recursion and should not be set
by the client.
**Returns**
a SigTree.SigTreeNode with the root of the decision tree
"""
if verbose: print(' '*depth,'Build')
tree=SigTree.SigTreeNode(None,'node',level=depth)
tree.SetData(-666)
#tree.SetExamples(examples)
# counts of each result code:
#resCodes = map(lambda x:int(x[-1]),examples)
resCodes = [int(x[-1]) for x in examples]
#print('resCodes:',resCodes)
counts = [0]*nPossibleRes
for res in resCodes:
counts[res] += 1
#print(' '*depth,'counts:',counts)
nzCounts = numpy.nonzero(counts)[0]
if verbose: print(' '*depth,'\tcounts:',counts)
if len(nzCounts) == 1:
# bottomed out because there is only one result code left
# with any counts (i.e. there's only one type of example
# left... this is GOOD!).
res = nzCounts[0]
tree.SetLabel(res)
tree.SetName(str(res))
tree.SetTerminal(1)
elif maxDepth>=0 and depth>maxDepth:
# Bottomed out: max depth hit
# We don't really know what to do here, so
# use the heuristic of picking the most prevalent
# result
v = numpy.argmax(counts)
tree.SetLabel(v)
tree.SetName('%d?'%v)
tree.SetTerminal(1)
else:
# find the variable which gives us the best improvement
# We do this with an InfoBitRanker:
fp = examples[0][1]
nBits = fp.GetNumBits()
ranker = InfoTheory.InfoBitRanker(nBits,nPossibleRes,metric)
if biasList: ranker.SetBiasList(biasList)
if CMIM is not None and useCMIM > 0 and not ensemble:
ensemble = CMIM.SelectFeatures(examples,useCMIM,bvCol=1)
if random:
if ensemble:
if len(ensemble)>random:
picks = _GenerateRandomEnsemble(random,len(ensemble))
availBits = list(take(ensemble,picks))
else:
availBits = range(len(ensemble))
else:
availBits = _GenerateRandomEnsemble(random,nBits)
else:
availBits=None
if availBits:
ranker.SetMaskBits(availBits)
#print(' 2:'*depth,availBits)
useCollections=isinstance(examples[0][1],VectCollection)
for example in examples:
#print(' '*depth,example[1].ToBitString(),example[-1])
if not useCollections:
ranker.AccumulateVotes(example[1],example[-1])
else:
example[1].Reset()
ranker.AccumulateVotes(example[1].orVect,example[-1])
try:
bitInfo = ranker.GetTopN(1)[0]
best = int(bitInfo[0])
gain = bitInfo[1]
except Exception:
import traceback
traceback.print_exc()
print('get top n failed')
gain = -1.0
if gain <= 0.0:
v = numpy.argmax(counts)
tree.SetLabel(v)
tree.SetName('?%d?'%v)
tree.SetTerminal(1)
return tree
best = int(bitInfo[0])
#print(' '*depth,'\tbest:',bitInfo)
if verbose: print(' '*depth,'\tbest:',bitInfo)
# set some info at this node
tree.SetName('Bit-%d'%(best))
tree.SetLabel(best)
#tree.SetExamples(examples)
tree.SetTerminal(0)
# loop over possible values of the new variable and
# build a subtree for each one
onExamples = []
offExamples = []
for example in examples:
if example[1][best]:
if allowCollections and useCollections:
sig = copy.copy(example[1])
sig.DetachVectsNotMatchingBit(best)
ex = [example[0],sig]
if len(example)>2:
ex.extend(example[2:])
example = ex
onExamples.append(example)
else:
offExamples.append(example)
#print(' '*depth,len(offExamples),len(onExamples))
for ex in (offExamples,onExamples):
if len(ex) == 0:
v = numpy.argmax(counts)
tree.AddChild('%d??'%v,label=v,data=0.0,isTerminal=1)
else:
child = BuildSigTree(ex,nPossibleRes,random=random,
ensemble=ensemble,
metric=metric,biasList=biasList,
depth=depth+1,maxDepth=maxDepth,
verbose=verbose)
if child is None:
v = numpy.argmax(counts)
tree.AddChild('%d???'%v,label=v,data=0.0,isTerminal=1)
else:
tree.AddChildNode(child)
return tree
def SigTreeBuilder(examples,attrs,nPossibleVals,initialVar=None,ensemble=None,
randomDescriptors=0,
**kwargs):
nRes = nPossibleVals[-1]
return BuildSigTree(examples,nRes,random=randomDescriptors,**kwargs)
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 12);
|
from django.core.urlresolvers import reverse, resolve
from django.utils.html import escape
from .base import Widget
from ..libs import List as TogaList, SimpleListElement as TogaSimpleListElement
class SimpleListElement(Widget):
def __init__(self, content, detail=None, **style):
super(SimpleListElement, self).__init__(**style)
self.content = content
self.detail = detail
self.startup()
def startup(self):
pass
def materialize(self):
return TogaSimpleListElement(
widget_id=self.widget_id,
content=escape(self.content),
delete_url=reverse(self.detail, kwargs={'pk': self.content.id})
)
def _set_window(self, window):
super()._set_window(window)
if self.on_press:
self.window.callbacks[(self.widget_id, 'on_press')] = self.on_press
class List(Widget):
IMPL_CLASS = TogaList
def __init__(self, source=None, detail=None, item_class=None, on_item_press=None, **style):
super(List, self).__init__(**style)
self.source = source
self.detail = detail
self.item_class = item_class
self.on_item_press = on_item_press
self.children = []
self.startup()
def startup(self):
pass
def materialize(self):
children = []
if self.source:
api_view = resolve(reverse(self.source)).func
for child in api_view.view_class().get_queryset():
children.append(self.item_class(child, self.detail).materialize())
else:
for child in self.children:
children.add(child.materialize())
return TogaList(
widget_id=self.widget_id,
children=children,
create_url=reverse(self.source),
on_item_press=self.handler(self.on_item_press, 'on_item_press') if self.on_item_press else None
)
def add(self, content):
if self.source:
raise Exception("Can't manually add to an API-sourced list")
self.children.append(self.item_class(content, self.detail))
def _set_app(self, app):
for child in self.children:
child.app = app
def _set_window(self, window):
for child in self.children:
child.window = window
if self.on_item_press:
self.window.callbacks[(self.widget_id, 'on_item_press')] = self.on_item_press
# def _hint_size(self, width, height, min_width=None, min_height=None):
# if width is not None:
# self.width = width
# else:
# del(self.width)
# if min_width is not None:
# self.min_width = min_width
# else:
# del(self.min_width)
# if height is not None:
# self.height = height
# else:
# del(self.height)
# if min_height is not None:
# self.min_height = min_height
# else:
# del(self.min_height)
# def _update_child_layout(self, **style):
# """Force a layout update on children of this container.
# The update request can be accompanied by additional style information
# (probably min_width, min_height, width or height) to control the
# layout.
# """
# for child in self.children:
# if child.is_container:
# child._update_layout()
# def _set_frame(self, frame):
# print("SET FRAME", self, frame.origin.x, frame.origin.y, frame.size.width, frame.size.height)
# self._impl.setFrame_(frame)
# self._impl.setNeedsDisplay_(True)
# for child in self.children:
# layout = child.layout
# child._set_frame(NSRect(NSPoint(layout.left, layout.top), NSSize(layout.width, layout.height)))
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounting', '0027_more_prbac_bootstrap'),
('accounting', '0030_remove_softwareplan_visibility_trial_internal'),
]
operations = [
]
|
import msgpackrpc
import time
class SumServer(object):
def sum(self, x, y):
return x + y
def sleepy_sum(self, x, y):
time.sleep(1)
return x + y
server = msgpackrpc.Server(SumServer())
server.listen(msgpackrpc.Address("localhost", 18800))
server.start()
|
import logging
import pytest
from traitlets.config import Config
from dockerspawner import DockerSpawner
def test_deprecated_config(caplog):
cfg = Config()
cfg.DockerSpawner.image_whitelist = {"1.0": "jupyterhub/singleuser:1.0"}
log = logging.getLogger("testlog")
spawner = DockerSpawner(config=cfg, log=log)
assert caplog.record_tuples == [
(
log.name,
logging.WARNING,
'DockerSpawner.image_whitelist is deprecated in DockerSpawner 12.0, use '
'DockerSpawner.allowed_images instead',
)
]
assert spawner.allowed_images == {"1.0": "jupyterhub/singleuser:1.0"}
async def test_deprecated_methods():
cfg = Config()
cfg.DockerSpawner.image_whitelist = {"1.0": "jupyterhub/singleuser:1.0"}
spawner = DockerSpawner(config=cfg)
assert await spawner.check_allowed("1.0")
with pytest.deprecated_call():
assert await spawner.check_image_whitelist("1.0")
|
from django.conf.urls import url
from .views import GetAuthToken, GetAuthTokenFacebook
urlpatterns = [
url(r'^$', GetAuthToken.as_view()),
url(r'^facebook/$', GetAuthTokenFacebook.as_view()),
]
|
import os
def get(var, default, type_=None):
"""Return a function to recover latest env variable contents."""
def _env_getter():
"""Recover the latest setting from the environment."""
val = os.environ.get(var, default)
if type_:
val = type_(val)
return val
return _env_getter
MONGO_DBNAME = get('MONGO_DBNAME', 'nozama-cloudsearch')
MONGO_HOST = get('MONGO_HOST', 'localhost')
MONGO_PORT = get('MONGO_PORT', 27017, int)
ELASTICSEARCH_HOST = get('ELASTICSEARCH_HOST', 'localhost')
ELASTICSEARCH_PORT = get('ELASTICSEARCH_PORT', 9200, int)
|
"""Helper module for parsing AWS ini config files."""
import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
AWS_CLI_CREDENTIALS_PATH = "~/.aws/credentials"
AWS_CLI_CONFIG_PATH = "~/.aws/config"
DEFAULT_PROFILE_NAME = os.getenv("AWS_DEFAULT_PROFILE", "default")
class NoConfigFoundException(Exception):
"""Config file not present."""
pass
def _get_config_parser(path):
"""Open and parse given config.
:type path: basestring
:rtype: ConfigParser.ConfigParser
"""
config_parser = configparser.ConfigParser()
try:
with open(os.path.expanduser(path), "rb") as f:
config_parser.readfp(f)
except IOError:
raise NoConfigFoundException("Can't find the config file: %s" % path)
else:
return config_parser
def _get_credentials_from_environment():
key = os.environ.get("AWS_ACCESS_KEY_ID")
secret = os.environ.get("AWS_SECRET_ACCESS_KEY")
return key, secret
def get_credentials(profile=None):
"""Returns AWS credentials.
Reads ~/.aws/credentials if the profile name is given or tries
to get them from environment otherwise. Returns a (key, secret)
tuple.
:type profile: basestring
:rtype: tuple
"""
if profile is None:
key, secret = _get_credentials_from_environment()
if key is not None and secret is not None:
return key, secret
raise NoConfigFoundException("AWS credentials not found.")
config = _get_config_parser(path=AWS_CLI_CREDENTIALS_PATH)
key = config.get(profile, "aws_access_key_id")
secret = config.get(profile, "aws_secret_access_key")
return key, secret
def get_credentials_dict(profile):
"""Returns credentials as a dict (for use as kwargs).
:type profile: basestring
:rtype: dict
"""
key, secret = get_credentials(profile)
return {"aws_access_key_id": key,
"aws_secret_access_key": secret}
def get_profile_names():
"""Get available profile names.
:rtype: list
:returns: list of profile names (strings)
"""
try:
return _get_config_parser(path=AWS_CLI_CREDENTIALS_PATH).sections()
except NoConfigFoundException:
return []
def has_default_profile():
"""Is default profile present?
:rtype: bool
"""
return DEFAULT_PROFILE_NAME in get_profile_names()
def get_default_region(profile):
"""Get the default region for given profile from AWS CLI tool's config.
:type profile: basestring
:rtype: basestring
:returns: name of defalt region if defined in config, None otherwise
"""
try:
config = _get_config_parser(path=AWS_CLI_CONFIG_PATH)
except NoConfigFoundException:
return None
try:
return config.get("profile %s" % profile, "region")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
return config.get("default", "region")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return None
|
from django.test import TestCase
from trix.trix_core import trix_markdown
class TestTrixMarkdown(TestCase):
def test_simple(self):
self.assertEqual(
trix_markdown.assignment_markdown('# Hello world\n'),
'<h1>Hello world</h1>')
def test_nl2br(self):
self.assertEqual(
trix_markdown.assignment_markdown('Hello\nworld'),
'<p>Hello<br>\nworld</p>')
|
import os
import mimetypes
from django.conf import settings as django_settings
from django.db import models
from django.template.defaultfilters import slugify
from django.core.files.images import get_image_dimensions
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from adminfiles import settings
if 'tagging' in django_settings.INSTALLED_APPS:
from tagging.fields import TagField
else:
TagField = None
class FileUpload(models.Model):
upload_date = models.DateTimeField(_('upload date'), auto_now_add=True)
upload = models.FileField(_('file'), upload_to=settings.ADMINFILES_UPLOAD_TO)
title = models.CharField(_('title'), max_length=100)
slug = models.SlugField(_('slug'), max_length=100, unique=True)
description = models.CharField(_('description'), blank=True, max_length=200)
content_type = models.CharField(editable=False, max_length=100)
sub_type = models.CharField(editable=False, max_length=100)
if TagField:
tags = TagField(_('tags'))
class Meta:
ordering = ['upload_date', 'title']
verbose_name = _('file upload')
verbose_name_plural = _('file uploads')
def __unicode__(self):
return self.title
def mime_type(self):
return '%s/%s' % (self.content_type, self.sub_type)
mime_type.short_description = _('mime type')
def type_slug(self):
return slugify(self.sub_type)
def is_image(self):
return self.content_type == 'image'
def _get_dimensions(self):
try:
return self._dimensions_cache
except AttributeError:
if self.is_image():
self._dimensions_cache = get_image_dimensions(self.upload.path)
else:
self._dimensions_cache = (None, None)
return self._dimensions_cache
def width(self):
return self._get_dimensions()[0]
def height(self):
return self._get_dimensions()[1]
def save(self, *args, **kwargs):
try:
uri = self.upload.path
except NotImplementedError:
uri = self.upload.url
(mime_type, encoding) = mimetypes.guess_type(uri)
try:
[self.content_type, self.sub_type] = mime_type.split('/')
except:
self.content_type = 'text'
self.sub_type = 'plain'
super(FileUpload, self).save()
def insert_links(self):
links = []
for key in [self.mime_type(), self.content_type, '']:
if key in settings.ADMINFILES_INSERT_LINKS:
links = settings.ADMINFILES_INSERT_LINKS[key]
break
for link in links:
ref = self.slug
opts = ':'.join(['%s=%s' % (k,v) for k,v in link[1].items()])
if opts:
ref += ':' + opts
yield {'desc': link[0],
'ref': ref}
def mime_image(self):
if not settings.ADMINFILES_STDICON_SET:
return None
return ('http://www.stdicon.com/%s/%s?size=64'
% (settings.ADMINFILES_STDICON_SET, self.mime_type()))
class FileUploadReference(models.Model):
"""
Tracks which ``FileUpload``s are referenced by which content models.
"""
upload = models.ForeignKey(FileUpload)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
class Meta:
unique_together = ('upload', 'content_type', 'object_id')
|
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Cabu'
copyright = '2016, Théotime Leveque'
author = 'Théotime Leveque'
version = '0.0.1'
release = '0.0.1'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = True
html_theme = 'alabaster'
html_theme_options = {
'logo': 'logo.jpeg',
'github_user': 'thylong',
'github_repo': 'cabu',
'logo_name': True,
'github_banner': True,
'travis_button': False,
'show_powered_by': False
}
html_static_path = ['_static']
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
htmlhelp_basename = 'Cabudoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
latex_documents = [
(master_doc, 'Cabu.tex', 'Cabu Documentation',
'Théotime Leveque', 'manual'),
]
man_pages = [
(master_doc, 'cabu', 'Cabu Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'Cabu', 'Cabu Documentation',
author, 'Cabu', 'One line description of project.',
'Miscellaneous'),
]
intersphinx_mapping = {'https://docs.python.org/': None}
|
import factory
import factory.django
from faker import Faker
from machina.core.db.models import get_model
from machina.test.factories.auth import UserFactory
from machina.test.factories.conversation import TopicFactory
faker = Faker()
TopicPoll = get_model('forum_polls', 'TopicPoll')
TopicPollOption = get_model('forum_polls', 'TopicPollOption')
TopicPollVote = get_model('forum_polls', 'TopicPollVote')
class TopicPollFactory(factory.django.DjangoModelFactory):
topic = factory.SubFactory(TopicFactory)
question = faker.text(max_nb_chars=200)
class Meta:
model = TopicPoll
class TopicPollOptionFactory(factory.django.DjangoModelFactory):
poll = factory.SubFactory(TopicPollFactory)
text = faker.text(max_nb_chars=100)
class Meta:
model = TopicPollOption
class TopicPollVoteFactory(factory.django.DjangoModelFactory):
poll_option = factory.SubFactory(TopicPollOptionFactory)
voter = factory.SubFactory(UserFactory)
class Meta:
model = TopicPollVote
|
def _types_gen(T):
yield T
if hasattr(T, 't'):
for l in T.t:
yield l
if hasattr(l, 't'):
for ll in _types_gen(l):
yield ll
class Type(type):
""" A rudimentary extension to `type` that provides polymorphic
types for run-time type checking of JSON data types. IE:
assert type(u'') == String
assert type('') == String
assert type('') == Any
assert Any.kind('') == String
assert Any.decode('str') == String
assert Any.kind({}) == Object
"""
def __init__(self, *args, **kwargs):
type.__init__(self, *args, **kwargs)
def __eq__(self, other):
for T in _types_gen(self):
if isinstance(other, Type):
if T in other.t:
return True
if type.__eq__(T, other):
return True
return False
def __str__(self):
return getattr(self, '_name', 'unknown')
def N(self, n):
self._name = n
return self
def I(self, *args):
self.t = list(args)
return self
def kind(self, t):
if type(t) is Type:
return t
ty = lambda t: type(t)
if type(t) is type:
ty = lambda t: t
return reduce(
lambda L, R: R if (hasattr(R, 't') and ty(t) == R) else L,
filter(lambda T: T is not Any,
_types_gen(self)))
def decode(self, n):
return reduce(
lambda L, R: R if (str(R) == n) else L,
_types_gen(self))
Object = Type('Object', (object,), {}).I(dict).N('obj')
Number = Type('Number', (object,), {}).I(int, long).N('num')
Boolean = Type('Boolean', (object,), {}).I(bool).N('bit')
String = Type('String', (object,), {}).I(str, unicode).N('str')
Array = Type('Array', (object,), {}).I(list, set, tuple).N('arr')
Nil = Type('Nil', (object,), {}).I(type(None)).N('nil')
Any = Type('Any', (object,), {}).I(
Object, Number, Boolean, String, Array, Nil).N('any')
|
'''
Main entry to worch from a waf wscript file.
Use the following in the options(), configure() and build() waf wscript methods:
ctx.load('orch.tools', tooldir='.')
'''
def options(opt):
opt.add_option('--orch-config', action = 'store', default = 'orch.cfg',
help='Give an orchestration configuration file.')
opt.add_option('--orch-start', action = 'store', default = 'start',
help='Set the section to start the orchestration')
def configure(cfg):
import orch.configure
orch.configure.configure(cfg)
def build(bld):
import orch.build
orch.build.build(bld)
import time
from orch.wafutil import exec_command
from orch.util import string2list
default_step_cwd = dict(
download = '{download_dir}',
unpack = '{source_dir}',
patch = '{source_dir}',
prepare = '{build_dir}',
build = '{build_dir}',
install = '{build_dir}',
)
class WorchConfig(object):
def __init__(self, **pkgcfg):
self._config = pkgcfg
def __getattr__(self, name):
return self._config[name]
def get(self, name, default = None):
return self._config.get(name,default)
def format(self, string, **kwds):
'''
Return a string formatted with kwds and configuration items
'''
d = dict(self._config, **kwds)
return string.format(**d)
def depends_step(self, step):
'''
Return a list of steps that this step depends on
'''
d = self._config.get('depends')
if not d: return list()
ds = [x[1] for x in [s.split(':') for s in string2list(d)] if x[0] == step]
return ds
def dependencies(self):
'''
Return all dependencies set via "depends" configuration items
return list of tuples: (mystep, package, package_step)
eg: ('prepare', 'gcc', 'install')
'''
ret = list()
try:
deps = getattr(self, 'depends', None)
except KeyError:
return list()
for dep in string2list(deps):
mystep, other = dep.split(':')
pkg,pkg_step = other.split('_',1)
ret.append((mystep, pkg, pkg_step))
return ret
def exports(self):
'''
Return all environment settings via export_* configuration items
return list of tuples: (variable, value, operator) for exports
eg: ('PATH', '/blah/blah', 'prepend')
'''
ret = list()
for key,val in self._config.items():
if not key.startswith('export_'):
continue
var = key[len('export_'):]
oper = 'set'
for maybe in ['prepend', 'append', 'set']:
if val.startswith(maybe+':'):
oper = maybe
val = val[len(maybe)+1:]
ret.append((var, val, oper))
return ret
from waflib.TaskGen import taskgen_method
@taskgen_method
def worch_hello(self):
'Just testing'
print ("%s" % self.worch.format('Hi from worch, my name is "{package}/{version}" and I am using "{dumpenv_cmd}" with extra {extra}', extra='spice'))
print ('My bld.env: %s' % (self.bld.env.keys(),))
print ('My all_envs: %s' % (sorted(self.bld.all_envs.keys()),))
print ('My env: %s' % (self.env.keys(),))
print ('My groups: %s' % (self.env['orch_group_dict'].keys(),))
print ('My packages: %s' % (self.env['orch_package_list'],))
@taskgen_method
def step(self, name, rule, **kwds):
'''
Make a worch installation step.
This invokes the build context on the rule with the following augmentations:
- the given step name is prefixed with the package name
- if the rule is a string (scriptlet) then the worch exec_command is used
- successful execution of the rule leads to a worch control file being produced.
'''
step_name = '%s_%s' % (self.worch.package, name)
# append control file as an additional output
target = string2list(kwds.get('target', ''))
if not isinstance(target, list):
target = [target]
cn = self.control_node(name)
if not cn in target:
target.append(cn)
kwds['target'] = target
kwds.setdefault('env', self.env)
cwd = kwds.get('cwd')
if not cwd:
cwd = default_step_cwd.get(name)
if cwd:
cwd = self.worch.format(cwd)
cwd = self.make_node(cwd)
msg.debug('orch: using cwd for step "%s": %s' % (step_name, cwd.abspath()))
kwds['cwd'] = cwd.abspath()
depends = self.worch.depends_step(name)
after = string2list(kwds.get('after',[])) + depends
if after:
kwds['after'] = after
msg.debug('orch: run %s AFTER: %s' % (step_name, after))
# functionalize scriptlet
rulefun = rule
if isinstance(rule, type('')):
rulefun = lambda t: exec_command(t, rule)
# curry the real rule function in order to write control file if successful
def runit(t):
rc = rulefun(t)
if not rc:
msg.debug('orch: successfully ran %s' % step_name)
cn.write(time.asctime(time.localtime()) + '\n')
return rc
# msg.debug('orch: step "%s" with %s in %s\nsource=%s\ntarget=%s' % \
# (step_name, rulefun, cwd, kwds.get('source'), kwds.get('target')))
# have to switch group each time as steps are called already asynchronously
self.bld.set_group(self.worch.group)
return self.bld(name=step_name, rule = runit, **kwds)
@taskgen_method
def control_node(self, step, package = None):
'''
Return a node for the control file given step of this package or optionally another package.
'''
if not package:
package = self.worch.package
filename = '%s_%s' % (package, step)
path = self.worch.format('{control_dir}/{filename}', filename=filename)
return self.path.find_or_declare(path)
@taskgen_method
def make_node(self, path, parent_node=None):
if not parent_node:
if path.startswith('/'):
parent_node = self.bld.root
else:
parent_node = self.bld.bldnode
return parent_node.make_node(path)
import waflib.Logs as msg
from waflib.Build import BuildContext
def worch_package(ctx, worch_config, *args, **kw):
# transfer waf-specific keywords explicitly
kw['name'] = worch_config['package']
kw['features'] = ' '.join(string2list(worch_config['features']))
kw['use'] = worch_config.get('use')
# make the TaskGen object for the package
worch=WorchConfig(**worch_config)
tgen = ctx(*args, worch=worch, **kw)
tgen.env = ctx.all_envs[worch.package]
tgen.env.env = tgen.env.munged_env
msg.debug('orch: package "%s" with features: %s' % \
(kw['name'], ', '.join(kw['features'].split())))
return tgen
BuildContext.worch_package = worch_package
del worch_package
|
from djpcms import sites
from djpcms.http import get_http
from djpcms.template import RequestContext, loader
from djpcms.views.baseview import djpcmsview
class badview(djpcmsview):
def __init__(self, template, httphandler):
self.template = template
self.httphandler = httphandler
super(badview,self).__init__()
def response(self, request):
t = loader.get_template(self.template)
c = {'request_path': request.path,
'grid': self.grid960()}
return self.httphandler(t.render(RequestContext(request, c)))
def http404view(request, *args, **kwargs):
http = get_http(sites.settings.HTTP_LIBRARY)
return badview('404.html',
http.HttpResponseNotFound).response(request)
def http500view(request, *args, **kwargs):
http = get_http(sites.settings.HTTP_LIBRARY)
return badview('500.html',
http.HttpResponseServerError).response(request)
|
""" Models for controlling the text and visual formatting of tick
labels on Bokeh plot axes.
"""
from __future__ import absolute_import
from .tickers import Ticker
from ..model import Model
from ..core.properties import abstract
from ..core.properties import Bool, Int, String, Enum, Auto, List, Dict, Either, Instance
from ..core.enums import DatetimeUnits, RoundingFunction, NumeralLanguage
@abstract
class TickFormatter(Model):
""" A base class for all tick formatter types. ``TickFormatter`` is
not generally useful to instantiate on its own.
"""
pass
class BasicTickFormatter(TickFormatter):
""" Display tick values from continuous ranges as "basic numbers",
using scientific notation when appropriate by default.
"""
precision = Either(Auto, Int, help="""
How many digits of precision to display in tick labels.
""")
use_scientific = Bool(True, help="""
Whether to ever display scientific notation. If ``True``, then
when to use scientific notation is controlled by ``power_limit_low``
and ``power_limit_high``.
""")
power_limit_high = Int(5, help="""
Limit the use of scientific notation to when::
log(x) >= power_limit_high
""")
power_limit_low = Int(-3, help="""
Limit the use of scientific notation to when::
log(x) <= power_limit_low
""")
class NumeralTickFormatter(TickFormatter):
""" Tick formatter based on a human-readable format string. """
format = String("0,0", help="""
The number format, as defined in the following tables:
**NUMBERS**:
============ ============== ===============
Number Format String
============ ============== ===============
10000 '0,0.0000' 10,000.0000
10000.23 '0,0' 10,000
10000.23 '+0,0' +10,000
-10000 '0,0.0' -10,000.0
10000.1234 '0.000' 10000.123
10000.1234 '0[.]00000' 10000.12340
-10000 '(0,0.0000)' (10,000.0000)
-0.23 '.00' -.23
-0.23 '(.00)' (.23)
0.23 '0.00000' 0.23000
0.23 '0.0[0000]' 0.23
1230974 '0.0a' 1.2m
1460 '0 a' 1 k
-104000 '0a' -104k
1 '0o' 1st
52 '0o' 52nd
23 '0o' 23rd
100 '0o' 100th
============ ============== ===============
**CURRENCY**:
=========== =============== =============
Number Format String
=========== =============== =============
1000.234 '$0,0.00' $1,000.23
1000.2 '0,0[.]00 $' 1,000.20 $
1001 '$ 0,0[.]00' $ 1,001
-1000.234 '($0,0)' ($1,000)
-1000.234 '$0.00' -$1000.23
1230974 '($ 0.00 a)' $ 1.23 m
=========== =============== =============
**BYTES**:
=============== =========== ============
Number Format String
=============== =========== ============
100 '0b' 100B
2048 '0 b' 2 KB
7884486213 '0.0b' 7.3GB
3467479682787 '0.000 b' 3.154 TB
=============== =========== ============
**PERCENTAGES**:
============= ============= ===========
Number Format String
============= ============= ===========
1 '0%' 100%
0.974878234 '0.000%' 97.488%
-0.43 '0 %' -43 %
0.43 '(0.000 %)' 43.000 %
============= ============= ===========
**TIME**:
============ ============== ============
Number Format String
============ ============== ============
25 '00:00:00' 0:00:25
238 '00:00:00' 0:03:58
63846 '00:00:00' 17:44:06
============ ============== ============
For the complete specification, see http://numbrojs.com/format.html
""")
language = Enum(NumeralLanguage, default="en", help="""
The language to use for formatting language-specific features (e.g. thousands separator).
""")
rounding = Enum(RoundingFunction, help="""
Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup).
""")
class PrintfTickFormatter(TickFormatter):
""" Tick formatter based on a printf-style format string. """
format = String("%s", help="""
The number format, as defined as follows: the placeholder in the format
string is marked by % and is followed by one or more of these elements,
in this order:
* An optional ``+`` sign
Causes the result to be preceded with a plus or minus sign on numeric
values. By default, only the ``-`` sign is used on negative numbers.
* An optional padding specifier
Specifies what (if any) character to use for padding. Possible values
are 0 or any other character preceded by a ``'`` (single quote). The
default is to pad with spaces.
* An optional ``-`` sign
Causes sprintf to left-align the result of this placeholder. The default
is to right-align the result.
* An optional number
Specifies how many characters the result should have. If the value to be
returned is shorter than this number, the result will be padded.
* An optional precision modifier
Consists of a ``.`` (dot) followed by a number, specifies how many digits
should be displayed for floating point numbers. When used on a string, it
causes the result to be truncated.
* A type specifier
Can be any of:
- ``%`` --- yields a literal ``%`` character
- ``b`` --- yields an integer as a binary number
- ``c`` --- yields an integer as the character with that ASCII value
- ``d`` or ``i`` --- yields an integer as a signed decimal number
- ``e`` --- yields a float using scientific notation
- ``u`` --- yields an integer as an unsigned decimal number
- ``f`` --- yields a float as is
- ``o`` --- yields an integer as an octal number
- ``s`` --- yields a string as is
- ``x`` --- yields an integer as a hexadecimal number (lower-case)
- ``X`` --- yields an integer as a hexadecimal number (upper-case)
""")
class LogTickFormatter(TickFormatter):
""" Display tick values from continuous ranges as powers
of some base.
Most often useful in conjunction with a ``LogTicker``.
"""
ticker = Instance(Ticker, help="""
The corresponding ``LogTicker``, used to determine the correct
base to use. If unset, the formatter will use base 10 as a default.
""")
class CategoricalTickFormatter(TickFormatter):
""" Display tick values from categorical ranges as string
values.
"""
pass
DEFAULT_DATETIME_FORMATS = lambda : {
'microseconds': ['%fus'],
'milliseconds': ['%3Nms', '%S.%3Ns'],
'seconds': ['%Ss'],
'minsec': [':%M:%S'],
'minutes': [':%M', '%Mm'],
'hourmin': ['%H:%M'],
'hours': ['%Hh', '%H:%M'],
'days': ['%m/%d', '%a%d'],
'months': ['%m/%Y', '%b%y'],
'years': ['%Y'],
}
class DatetimeTickFormatter(TickFormatter):
""" Display tick values from a continuous range as formatted
datetimes.
"""
formats = Dict(Enum(DatetimeUnits), List(String), default=DEFAULT_DATETIME_FORMATS, help="""
User defined formats for displaying datetime values.
The enum values correspond roughly to different "time scales". The
corresponding value is a list of `strftime`_ formats to use for
formatting datetime tick values that fall in in that "time scale".
By default, only the first format string passed for each time scale
will be used. By default, all leading zeros are stripped away from
the formatted labels. These behaviors cannot be changed as of now.
An example of specifying the same date format over a range of time scales::
DatetimeTickFormatter(
formats=dict(
hours=["%B %Y"],
days=["%B %Y"],
months=["%B %Y"],
years=["%B %Y"],
)
)
This list of supported `strftime`_ formats is reproduced below.
.. warning::
The client library BokehJS uses the `timezone`_ library to
format datetimes. The inclusion of the list below is based on the
claim that `timezone`_ makes to support "the full compliment
of GNU date format specifiers." However, this claim has not
been tested exhaustively against this list. If you find formats
that do not function as expected, please submit a `github issue`_,
so that the documentation can be updated appropriately.
%a
The abbreviated name of the day of the week according to the
current locale.
%A
The full name of the day of the week according to the current
locale.
%b
The abbreviated month name according to the current locale.
%B
The full month name according to the current locale.
%c
The preferred date and time representation for the current
locale.
%C
The century number (year/100) as a 2-digit integer.
%d
The day of the month as a decimal number (range 01 to 31).
%D
Equivalent to %m/%d/%y. (Americans should note that in many
other countries %d/%m/%y is rather common. This means that in
international context this format is ambiguous and should not
be used.)
%e
Like %d, the day of the month as a decimal number, but a
leading zero is replaced by a space.
%f
Microsecond as a decimal number, zero-padded on the left (range
000000-999999). This is an extension to the set of directives
available to `timezone`_.
%F
Equivalent to %Y-%m-%d (the ISO 8601 date format).
%G
The ISO 8601 week-based year with century as a decimal number.
The 4-digit year corresponding to the ISO week number (see %V).
This has the same format and value as %Y, except that if the
ISO week number belongs to the previous or next year, that year
is used instead.
%g
Like %G, but without century, that is, with a 2-digit year (00-99).
%h
Equivalent to %b.
%H
The hour as a decimal number using a 24-hour clock (range 00
to 23).
%I
The hour as a decimal number using a 12-hour clock (range 01
to 12).
%j
The day of the year as a decimal number (range 001 to 366).
%k
The hour (24-hour clock) as a decimal number (range 0 to 23).
Single digits are preceded by a blank. (See also %H.)
%l
The hour (12-hour clock) as a decimal number (range 1 to 12).
Single digits are preceded by a blank. (See also %I.) (TZ)
%m
The month as a decimal number (range 01 to 12).
%M
The minute as a decimal number (range 00 to 59).
%n
A newline character. Bokeh text does not currently support
newline characters.
%N
Nanosecond as a decimal number, zero-padded on the left (range
000000000-999999999). Supports a padding width specifier, i.e.
%3N displays 3 leftmost digits. However, this is only accurate
to the millisecond level of precision due to limitations of
`timezone`_.
%p
Either "AM" or "PM" according to the given time value, or the
corresponding strings for the current locale. Noon is treated
as "PM" and midnight as "AM".
%P
Like %p but in lowercase: "am" or "pm" or a corresponding
string for the current locale.
%r
The time in a.m. or p.m. notation. In the POSIX locale this
is equivalent to %I:%M:%S %p.
%R
The time in 24-hour notation (%H:%M). For a version including
the seconds, see %T below.
%s
The number of seconds since the Epoch, 1970-01-01 00:00:00
+0000 (UTC).
%S
The second as a decimal number (range 00 to 60). (The range
is up to 60 to allow for occasional leap seconds.)
%t
A tab character. Bokeh text does not currently support tab
characters.
%T
The time in 24-hour notation (%H:%M:%S).
%u
The day of the week as a decimal, range 1 to 7, Monday being 1.
See also %w.
%U
The week number of the current year as a decimal number, range
00 to 53, starting with the first Sunday as the first day of
week 01. See also %V and %W.
%V
The ISO 8601 week number (see NOTES) of the current year as a
decimal number, range 01 to 53, where week 1 is the first week
that has at least 4 days in the new year. See also %U and %W.
%w
The day of the week as a decimal, range 0 to 6, Sunday being 0.
See also %u.
%W
The week number of the current year as a decimal number, range
00 to 53, starting with the first Monday as the first day of
week 01.
%x
The preferred date representation for the current locale
without the time.
%X
The preferred time representation for the current locale
without the date.
%y
The year as a decimal number without a century (range 00 to 99).
%Y
The year as a decimal number including the century.
%z
The +hhmm or -hhmm numeric timezone (that is, the hour and
minute offset from UTC).
%Z
The timezone name or abbreviation.
%%
A literal '%' character.
.. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html
.. _timezone: http://bigeasy.github.io/timezone/
.. _github issue: https://github.com/bokeh/bokeh/issues
""")
|
"""
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.contrib.gis import gdal
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_functions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = set()
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_placeholder() method')
def get_expression_column(self, evaluator):
"""
Helper method to return the quoted column string from the evaluator
for its expression.
"""
for expr, col_tup in evaluator.cols:
if expr is evaluator.expression:
return '%s.%s' % tuple(map(self.quote_name, col_tup))
raise Exception("Could not find the column for the expression.")
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_lookup_sql() method')
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide geometry_columns() method')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
@python_2_unicode_compatible
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception as msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception as msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m:
return (float(m.group('major')), float(m.group('flattening')))
else:
return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __str__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return six.text_type(self.srs)
except Exception:
return six.text_type(self.wkt)
|
__author__ = 'oglebrandon'
import logging as logger
import types
from ib.ext.EWrapper import EWrapper
def showmessage(message, mapping):
try:
del(mapping['self'])
except (KeyError, ):
pass
items = mapping.items()
items.sort()
print '### %s' % (message, )
for k, v in items:
print ' %s:%s' % (k, v)
class Observable(object):
"""
Sender -> dispatches messages to interested callables
"""
def __init__(self):
self.listeners = {}
self.logger = logger.getLogger()
def register(self,listener,events=None):
"""
register a listener function
Parameters
-----------
listener : external listener function
events : tuple or list of relevant events (default=None)
"""
if events is not None and type(events) not in \
(types.TupleType,types.ListType):
events = (events,)
self.listeners[listener] = events
def dispatch(self,event=None, msg=None):
"""notify listeners """
for listener,events in self.listeners.items():
if events is None or event is None or event in events:
try:
listener(self,event,msg)
except (Exception,):
self.unregister(listener)
errmsg = "Exception in message dispatch: Handler '{0}' " \
"unregistered for event " \
"'{1}' ".format(listener.func_name,event)
self.logger.exception(errmsg)
def unregister(self,listener):
""" unregister listener function """
del self.listeners[listener]
class ReferenceWrapper(EWrapper,Observable):
# contract = None
# tickerId
# field
# price
def __init__ (self,subs={}):
super(ReferenceWrapper, self).__init__()
self.orderID = None
self.subscriptions = subs
def setSubscriptions (self,subs):
self.subscriptions = subs
def tickGeneric(self, tickerId, field, price):
pass
def tickPrice(self, tickerId, field, price, canAutoExecute):
showmessage('tickPrice', vars())
def tickSize(self, tickerId, field, size):
showmessage('tickSize', vars())
def tickString(self, tickerId, tickType, value):
#showmessage('tickString', vars())
pass
def tickOptionComputation(self, tickerId, field,
impliedVolatility, delta,
x, c, q, w, e, r):
#showmessage('tickOptionComputation', vars())
pass
def openOrderEnd(self):
pass
def orderStatus(self, orderId, status, filled, remaining,
avgFillPrice, permId, parentId, lastFillPrice,
clientId, whyHeId):
if filled:
self.dispatch(event='execution',msg=[1,2,3])
showmessage('orderStatus', vars())
def openOrder(self, orderId, contract, order, state):
showmessage('openOrder', vars())
def connectionClosed(self):
showmessage('connectionClosed', {})
def updateAccountValue(self, key, value, currency, accountName):
showmessage('updateAccountValue', vars())
def updatePortfolio(self, contract, position, marketPrice,
marketValue, averageCost, unrealizedPNL,
realizedPNL, accountName):
showmessage('updatePortfolio', vars())
def updateAccountTime(self, timeStamp):
showmessage('updateAccountTime', vars())
def nextValidId(self, orderId):
self.orderID = orderId
showmessage('nextValidId', vars())
def contractDetails(self, reqId, contractDetails):
showmessage('contractDetails', vars())
print contractDetails.__dict__
def bondContractDetails(self, reqId, contractDetails):
showmessage('bondContractDetails', vars())
def execDetails(self, orderId, contract, execution):
showmessage('execDetails', vars())
def error(self, id=None, errorCode=None, errorMsg=None):
showmessage('error', vars())
def updateMktDepth(self, tickerId, position, operation, side, price, size):
showmessage('updateMktDepth', vars())
def updateMktDepthL2(self, tickerId, position,
marketMaker, operation,
side, price, size):
showmessage('updateMktDepthL2', vars())
def updateNewsBulletin(self, msgId, msgType, message, origExchange):
showmessage('updateNewsBulletin', vars())
def managedAccounts(self, accountsList):
showmessage('managedAccounts', vars())
def receiveFA(self, faDataType, xml):
showmessage('receiveFA', vars())
def historicalData(self, reqId, date,
open, high, low, close,
volume, count, WAP, hasGaps):
showmessage('historicalData', vars())
def scannerParameters(self, xml):
showmessage('scannerParameters', vars())
def scannerData(self, reqId, rank, contractDetails,
distance, benchmark, projection, legsStr):
showmessage('scannerData', vars())
def accountDownloadEnd(self, accountName):
showmessage('accountDownloadEnd', vars())
def contractDetailsEnd(self, reqId):
showmessage('contractDetailsEnd', vars())
def currentTime(self):
showmessage('currentTime', vars())
def deltaNeutralValidation(self):
showmessage('deltaNeutralValidation', vars())
def error_0(self):
showmessage('error_0', vars())
def error_1(self):
showmessage('error_1', vars())
def execDetailsEnd(self):
showmessage('execDetailsEnd', vars())
def fundamentalData(self):
showmessage('fundamentalData', vars())
def realtimeBar(self):
showmessage('realtimeBar', vars())
def scannerDataEnd(self):
showmessage('scannerDataEnd', vars())
def tickEFP(self):
showmessage('tickEFP', vars())
def tickSnapshotEnd(self):
showmessage('tickSnapshotEnd', vars())
def marketDataType(self):
showmessage('marketDataType', vars())
def commissionReport(self, commissionReport):
showmessage('commissionReport', vars())
|
import sys
import os
import subprocess
file_list, tmp_dir, out_dir, fastq_dump = sys.argv[1:5]
files = []
for line in open(file_list, 'r'):
line = line.strip()
if not line or line.startswith('#'):
continue
fields = line.split()
srx = fields[1]
for srr in fields[2].split(','):
files.append([srr, srx])
for file in files:
srr, srx = file
if (not os.path.exists("%s/%s_1.fastq" % (out_dir, srr)) or
not os.path.exists("%s/%s_2.fastq" % (out_dir, srr))):
if not os.path.exists("%s/%s.sra" % (tmp_dir, srr)):
subprocess.call('wget ftp://ftp-trace.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByExp/sra/%s/%s/%s/%s/%s.sra -O %s' % (srx[:3], srx[:6], srx, srr, srr, "%s/%s.sra" % (tmp_dir, srr)), shell=True)
for file in files:
srr, srx = file
if (not os.path.exists("%s/%s_1.fastq" % (out_dir, srr)) or
not os.path.exists("%s/%s_2.fastq" % (out_dir, srr))):
subprocess.call('cd %s; %s %s.sra --split-3' % (tmp_dir, fastq_dump, srr), shell=True)
subprocess.call('mv %s/%s_1.fastq %s/' % (tmp_dir, srr, out_dir), shell=True)
subprocess.call('mv %s/%s_2.fastq %s/' % (tmp_dir, srr, out_dir), shell=True)
subprocess.call('rm %s/%s.sra' % (tmp_dir, srr), shell=True)
|
'''
Various vertical coordinates
Presently, only ocean s-coordinates are supported. Future plans will be to
include all of the vertical coordinate systems defined by the CF conventions.
'''
__docformat__ = "restructuredtext en"
import numpy as np
import warnings
class s_coordinate(object):
"""
Song and Haidvogel (1994) vertical coordinate transformation (Vtransform=1) and
stretching functions (Vstretching=1).
return an object that can be indexed to return depths
s = s_coordinate(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = min(self.hmin, self.Tcline)
self.Vtrans = 1
if (self.Tcline > self.hmin):
warnings.warn('Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1.' %(self.Tcline,self.hmin))
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
lev = np.arange(1,self.N+1,1)
ds = 1.0 / self.N
self.s_rho = -self.c1 + (lev - self.p5) * ds
def _get_s_w(self):
lev = np.arange(0,self.Np,1)
ds = 1.0 / (self.Np-1)
self.s_w = -self.c1 + lev * ds
def _get_Cs_r(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_rho) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_rho + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_r = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_w) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_w + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_w = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_w = self.s_w
class s_coordinate_2(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=2).
return an object that can be indexed to return depths
s = s_coordinate_2(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 2
self.Aweight = 1.0
self.Bweight = 1.0
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_2, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_2, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_rho + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_rho + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_rho + self.c1)**self.Bweight))
self.Cs_r = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_r = Csur
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_w + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_w + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_w + self.c1)**self.Bweight))
self.Cs_w = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_w = Csur
else:
self.Cs_w = self.s_w
class s_coordinate_4(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=4).
return an object that can be indexed to return depths
s = s_coordinate_4(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 4
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_4, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_4, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_rho**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
(self.c1 - np.exp(-self.theta_b))
self.Cs_r = Cbot
else:
self.Cs_r = Csur
def _get_Cs_w(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_w**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
( self.c1 - np.exp(-self.theta_b) )
self.Cs_w = Cbot
else:
self.Cs_w = Csur
class s_coordinate_5(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=5).
return an object that can be indexed to return depths
s = s_coordinate_5(h, theta_b, theta_s, Tcline, N)
Brian Powell's surface stretching.
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 5
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
lev = np.arange(1,self.N+1,1)
s = -(lev * lev - 2 * lev * self.N + lev + self.N * self.N - self.N) / \
(self.N * self.N - self.N) - \
0.01 * (lev * lev - lev * self.N) / (self.c1 - self.N)
self.s_rho = s
def _get_s_w(self):
lev = np.arange(0,self.Np,1)
s = -(lev * lev - 2 * lev * self.N + lev + self.N * self.N - self.N) / \
(self.N * self.N - self.N) - \
0.01 * (lev * lev - lev * self.N) / (self.c1 - self.N)
self.s_w = s
def _get_Cs_r(self):
if self.theta_s > 0:
csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
else:
csur = -(self.s_rho * self.s_rho)
if self.theta_b > 0:
self.Cs_r = (np.exp(self.theta_b * (csur + self.c1)) - self.c1) / \
(np.exp(self.theta_b) - self.c1) - self.c1
else:
self.Cs_r = csur
def _get_Cs_w(self):
if self.theta_s > 0:
csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
else:
csur = -(self.s_w * self.s_w)
if self.theta_b > 0:
self.Cs_w = (np.exp(self.theta_b * (csur + self.c1)) - self.c1) / \
(np.exp(self.theta_b) - self.c1) - self.c1
else:
self.Cs_w = csur
class z_r(object):
"""
return an object that can be indexed to return depths of rho point
z_r = z_r(h, hc, N, s_rho, Cs_r, zeta, Vtrans)
"""
def __init__(self, h, hc, N, s_rho, Cs_r, zeta, Vtrans):
self.h = h
self.hc = hc
self.N = N
self.s_rho = s_rho
self.Cs_r = Cs_r
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_r = np.empty((ti, self.N) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.N):
z0 = self.hc * self.s_rho[k] + (self.h - self.hc) * self.Cs_r[k]
z_r[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4 or self.Vtrans == 5:
for n in range(ti):
for k in range(self.N):
z0 = (self.hc * self.s_rho[k] + self.h * self.Cs_r[k]) / \
(self.hc + self.h)
z_r[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_r[res_index])
class z_w(object):
"""
return an object that can be indexed to return depths of w point
z_w = z_w(h, hc, Np, s_w, Cs_w, zeta, Vtrans)
"""
def __init__(self, h, hc, Np, s_w, Cs_w, zeta, Vtrans):
self.h = h
self.hc = hc
self.Np = Np
self.s_w = s_w
self.Cs_w = Cs_w
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_w = np.empty((ti, self.Np) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.Np):
z0 = self.hc * self.s_w[k] + (self.h - self.hc) * self.Cs_w[k]
z_w[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4:
for n in range(ti):
for k in range(self.Np):
z0 = (self.hc * self.s_w[k] + self.h * self.Cs_w[k]) / \
(self.hc + self.h)
z_w[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_w[res_index])
class z_coordinate(object):
"""
return an object that can be indexed to return depths
z = z_coordinate(h, depth, N)
"""
def __init__(self, h, depth, N):
self.h = np.asarray(h)
self.N = int(N)
ndim = len(h.shape)
if ndim == 2:
Mm, Lm = h.shape
self.z = np.zeros((N, Mm, Lm))
elif ndim == 1:
Sm = h.shape[0]
self.z = np.zeros((N, Sm))
for k in range(N):
self.z[k,:] = depth[k]
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['LinearTrend'] , ['BestCycle'] , ['MLP'] );
|
from eisoil.core.exception import CoreException
class ScheduleException(CoreException):
def __init__(self, desc):
self._desc = desc
def __str__(self):
return "Schedule: %s" % (self._desc,)
class ScheduleOverbookingError(ScheduleException):
def __init__(self, schedule_subject, resource_id, start_time, end_time):
"""All parameters should be strings or be able to str(...) itself."""
super(ScheduleOverbookingError, self).__init__("There are already reservations for %s during [%s - %s] in the %s schedule." % (str(resource_id), str(start_time), str(end_time), str(schedule_subject)))
class ScheduleNoSuchReservationError(ScheduleException):
def __init__(self, reservation_id):
super(ScheduleNoSuchReservationError, self).__init__("Could not find reservation with id %d." % (reservation_id))
|
from __future__ import unicode_literals
from collections import Counter
from itertools import groupby
from operator import itemgetter
import numpy
from django.db.models import F
from tracpro.charts.formatters import format_number
from .utils import get_numeric_values
from . import rules
def get_map_data(responses, question):
answers = get_answers(responses, question)
if question.question_type == question.TYPE_NUMERIC:
map_data = numeric_map_data(answers, question)
elif question.question_type == question.TYPE_MULTIPLE_CHOICE:
map_data = multiple_choice_map_data(answers, question)
else:
map_data = None
if map_data:
return {
'map-data': map_data,
'all-categories': rules.get_all_categories(question, answers),
}
else:
return None
def get_answers(responses, question):
"""Return answers to the question from the responses, annotated with `boundary`.
Excludes answers that are not associated with a boundary.
"""
answers = question.answers.filter(response__in=responses)
answers = answers.annotate(boundary=F('response__contact__region__boundary'))
answers = answers.exclude(boundary=None)
return answers
def numeric_map_data(answers, question):
"""For each boundary, display the category of the average answer value."""
map_data = {}
answer_data = [
{
'boundary': answer.boundary,
'value_to_use': answer.value_to_use
}
for answer in answers.order_by('boundary')
]
for boundary_id, _answers in groupby(answer_data, itemgetter('boundary')):
values = get_numeric_values(a['value_to_use'] for a in _answers)
if len(values) > 0:
average = round(numpy.mean(values), 2)
map_data[boundary_id] = {
'average': format_number(average, digits=2),
'category': question.categorize(average),
}
return map_data
def multiple_choice_map_data(answers, question):
"""For each boundary, display the most common answer category."""
map_data = {}
answer_data = answers.exclude(category=None).exclude(category="")
answer_data = answer_data.order_by('boundary').values('boundary', 'category')
for boundary_id, _answers in groupby(answer_data, itemgetter('boundary')):
top_category = Counter(a['category'] for a in _answers).most_common(1)[0][0]
map_data[boundary_id] = {
'category': top_category,
}
return map_data
|
from setuptools import setup, find_packages
setup(
name="gevent-websocket",
version="0.3.6",
description="Websocket handler for the gevent pywsgi server, a Python network library",
long_description=open("README.rst").read(),
author="Jeffrey Gelens",
author_email="jeffrey@noppo.pro",
license="BSD",
url="https://bitbucket.org/Jeffrey/gevent-websocket",
download_url="https://bitbucket.org/Jeffrey/gevent-websocket",
install_requires=("gevent", "greenlet"),
packages=find_packages(exclude=["examples","tests"]),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
)
|
from __future__ import print_function
__doc__ = """
Bambou provides a set of objects that allow the manipulation of ReST entities very easily. It deals with all possible CRUD operations.
It is based on the library `Bambou`, which defines all these low level operations in a single place.
`Bambou` is composed of the following important classes:
* `bambou.NURESTSession`
Class representing an authenticated session.
* `bambou.NURESTObject`
Parent class of all ReST entities. All ReST exposed object objects inherit from this class.
* `bambou.NURESTFetcher`
Class used to get children of a `bambou.NURESTObject`.
* `bambou.NURESTPushCenter`
Class that deals with intercepting and rerouting ReST Push Notifications.
> There are more objects in `Bambou`, but you don't need to know all of them for now.
The `bambou.NURESTSession` represents some user credentials coupled with an API URL. All ReST calls are done using
the current active session. `bambou.NURESTSession` is an abstract class that must be reimplemented by anything using `Bambou`.
In a `MySDK` using bambou, you use a class named `mysdk.v3_2.MySession` which will be used in the following examples.
#!python
session = My}Session(username="user", password="secret", enterprise="organization", api_url="https://server")
session.start()
# your script
When you start the session, a ReST call will be sent to the API endpoint in order to get the API key.
If the credentials are valid, the attribute `MySDK.v3_2.MySession.root` will be populated with information such as your name,
your phone number, your avatar, your enterprise name and ID etc. This `user` is the root object of everything as all subsequent
calls need to be done in the context of your account (for instance, your `/enterprises` are different from another account's `/enterprises`)
It is also possible to create sub sessions with the python statement `with`:
#!python
cspsession = MySession(username="user", password="secret", enterprise="organization", api_url="https://server")
adminsession = MySession(username="admin", password="secret", enterprise="enterprise", api_url="https://server")
cspsession.start()
# this part of the code will use the CSP root user
with adminsession.start():
# this code block will be executed as admin of `enterprise`
# back to csp root session
> You **must** use `start()` when using the `with` statement, even if the session has already been started in the main context.
`bambou.NURESTObject` is the parent class of all `MySDK` entities.
All `bambou.NURESTObject` subclasses implements a given method that will return the actual ReST name of the objects. For instance, the ReST name of an Unicorn object is `unicorn`.
These names are used to forge the correct URI when doing CRUD operations on them.
> ReST names can be used as unique resource identifier for a given object.
> ReST names are auto generated. You never need to manually define them.
`bambou.NURESTObject` is able to forge all the URI needed to interact with the server through the ReST API.
For instance, if an object with a ReST name set to `object` needs to get the list of children with ReST name set to `subobject`, `Bambou` will use the following endpoint URL:
`GET {api_base_url}/objects/{id}/subobjects`
If an object with a ReST name set to `entity` needs to fetch itself, the generated URL will be
`GET {api_base_url}/entities/{id}`
> `Bambou` automagically deals with plurals.
> The ReST base URL is pulled from the current active `bambou.NURESTSession`.
> URI are auto generated. You never need to deal with them manually.
Exposed attributes will be converted and sent to the server when you do CRUD operations. That way, if an object has an attribute `name`, it can be marked as a ReST attribute.
When saving the object, the value of `name` will be put into the generated JSON structure that will be sent to the server, or automatically populated from a JSON structure that is coming from the server.
Not only the attribute can be exposed, but also its type and other informations like if it is read only, its allowed values, its format, its default value and so on.
> exposing ReST Attributes is auto generated. You never need to manually expose new attributes.
`bambou.NURESTObject` allows to perform all sorts of CRUD operations.
* `bambou.NURESTObject.fetch`
* `bambou.NURESTObject.save`
* `bambou.NURESTObject.delete`
* `bambou.NURESTObject.create_child`
* `bambou.NURESTObject.assign`
* `bambou.NURESTObject.instantiate_child`
> All these methods require the current `bambou.NURESTObject` to have a valid `bambou.NURESTObject.ID`.
> You may notice that there is no creation method. Creation is always happening from a parent object and is done using `create_child`.
> You may notice that an optional parameter `callback` is present. This is because `MySDK` can work completely asynchronously.
`bambou.NURESTObject` allows quick and easy conversion from and to python dictionaries
* `bambou.NURESTObject.from_dict`
* `bambou.NURESTObject.to_dict`
> you never need to process to the actual JSON conversion when sending info to the server. `bambou.NURESTConnection` will do that automatically, but you can use these methods to print an object, or copy information of an object into one another.
`bambou.NURESTFetcher` is a class allowing a `bambou.NURESTObject` to fetch its children. All `bambou.NURESTObject` have one or more fetchers, unless it's a final object in the model hierarchy. `bambou.NURESTFetcher` provides a lot of possibility regarding the way you want to get a given children list. It can deal with simple object fetching, pagination, filtering, request headers, grouping etc.
`bambou.NURESTFetcher` has three importants methods:
* `bambou.NURESTFetcher.fetch`
* `bambou.NURESTFetcher.get`
* `bambou.NURESTFetcher.get_first`
Fetcher is a powerfull concept that makes the process of getting child objects completely generic and code friendly. `bambou.NURESTObject` provides methods that allow to deal programatically with its fetchers in a completely generic way.
* `bambou.NURESTObject.fetcher_for_rest_name`
* `bambou.NURESTObject.fetchers`
* `bambou.NURESTObject.children_rest_names`
This allows complete abstract programatic operations on any objects.
For instance, the following function will create a new `MySDK.v3_2.Metadata` to the entire hierarchy of children from a given object that has been created after a certain date:
#!python
def apply_metatada_to_all_children(root_object, metadata, filter=None):
# Loop on all declared children fetchers
for fetcher in root_object.fetchers:
# Fetch the list of the children
children = fetcher.get(filter=filter)
# Loop on all fetched children
for child in children:
# Add the metadata to the current children
child.create_child(metadata)
# Start over recursively on the children of the current child
apply_metadata_to_all_children(child, metadata)
enterprise = Enterprise(id="xxxx-xxxx-xxx-xxxx")
metadata = Metadata(name="my metadata", blob="hello world!")
apply_metadata_to_all_children(enterprise, metadata, filter="creationDate > '01-01-2015'")
The API supports client side push through a long polling connection. ReST clients can connect to that channel and will get a notification as soon as he or someone else in the system changes something. This events are filtered by permissions, which means that if someone change a property of an object you cannot see, you won't get notified. `MySDK` provides the `bambou.NURESTPushCenter`, which encapsulates all the logic to deal with the event channel. It runs in its own thread and will call registered callbacks when it receives a push.
A `bambou.NURESTPushCenter` is automatically created with each `bambou.NURESTSession` and it is available from the attribute `bambou.NURESTSession.push_center`.
#!python
session = MySession(username="user", password="secret", enterprise="organization", api_url="https://server")
session.start()
session.push_center.start()
> You need to explicitely start the push center.
Only the following methods are important:
* `bambou.NURESTPushCenter.start`
* `bambou.NURESTPushCenter.add_delegate`
* `bambou.NURESTPushCenter.remove_delegate`
Here is a really simple code sample that will print the push data on every push:
#!python
from MySDK import *
from pprint import pprint
from time import sleep
session = MySession(username="csproot", password="secret", enterprise="csp", api_url="https://server")
session.start()
def on_receive_push(data):
pprint(data);
session.push_center.add_delegate(on_receive_push);
session.push_center.start()
# default stupid run loop. don't do that in real life :)
while True:
sleep(1000)
Now you know the basics of `Bambou` and so, of the `MySDK`. Remember that all objects in `MySDK` are subclasses of `bambou.NURESTObject` so they **all** work exactly the same.
There is a lot more to know about `Bambou` like the asynchronous mode, auto model parsing, easy controllers creation thanks introspection and so on. We'll cover this in a different advanced section.
"""
try:
import requests
requests.packages.urllib3.disable_warnings()
except:
pass
import logging
bambou_logger = logging.getLogger('bambou')
pushcenter_logger = logging.getLogger('pushcenter')
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
bambou_logger.addHandler(NullHandler())
__all__ = ['NURESTRootObject', 'NURESTConnection', 'NURESTModelController', 'NURESTFetcher', 'NURESTLoginController', 'NURESTObject', 'NURESTPushCenter', 'NURESTRequest', 'NURESTResponse', 'NURESTSession', 'BambouConfig']
from bambou.nurest_session import NURESTSession
from bambou.nurest_root_object import NURESTRootObject
from bambou.nurest_connection import NURESTConnection
from bambou.nurest_fetcher import NURESTFetcher
from bambou.nurest_login_controller import NURESTLoginController
from bambou.nurest_object import NURESTObject
from bambou.nurest_push_center import NURESTPushCenter
from bambou.nurest_request import NURESTRequest
from bambou.nurest_response import NURESTResponse
from bambou.nurest_modelcontroller import NURESTModelController
from bambou.config import BambouConfig
|
import numpy as np
from pysal.lib.common import requires
@requires('matplotlib')
def shift_colormap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
@requires('matplotlib')
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
Function to truncate a colormap by selecting a subset of the original colormap's values
Parameters
----------
cmap : Mmatplotlib colormap to be altered
minval : Minimum value of the original colormap to include in the truncated colormap
maxval : Maximum value of the original colormap to include in the truncated colormap
n : Number of intervals between the min and max values for the gradient of the truncated colormap
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
@requires('matplotlib')
@requires('geopandas')
def compare_surfaces(data, var1, var2, gwr_t, gwr_bw, mgwr_t, mgwr_bw, name,
kwargs1, kwargs2, savefig=None):
'''
Function that creates comparative visualization of GWR and MGWR surfaces.
Parameters
----------
data : pandas or geopandas Dataframe
gwr/mgwr results
var1 : string
name of gwr parameter estimate column in frame
var2 : string
name of mgwr parameter estimate column in frame
gwr_t : string
name of gwr t-values column in frame associated with var1
gwr_bw : float
bandwidth for gwr model for var1
mgwr_t : string
name of mgwr t-values column in frame associated with var2
mgwr_bw: float
bandwidth for mgwr model for var2
name : string
common variable name to use for title
kwargs1:
additional plotting arguments for gwr surface
kwargs2:
additional plotting arguments for mgwr surface
savefig: string, optional
path to save the figure. Default is None. Not to save figure.
'''
import matplotlib.pyplot as plt
import geopandas as gp
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(45,20))
ax0 = axes[0]
ax0.set_title('GWR ' + name + ' Surface (BW: ' + str(gwr_bw) +')', fontsize=40)
ax1 = axes[1]
ax1.set_title('MGWR ' + name + ' Surface (BW: ' + str(mgwr_bw) +')', fontsize=40)
#Set color map
cmap = plt.cm.seismic
#Find min and max values of the two combined datasets
gwr_min = data[var1].min()
gwr_max = data[var1].max()
mgwr_min = data[var2].min()
mgwr_max = data[var2].max()
vmin = np.min([gwr_min, mgwr_min])
vmax = np.max([gwr_max, mgwr_max])
#If all values are negative use the negative half of the colormap
if (vmin < 0) & (vmax < 0):
cmap = truncate_colormap(cmap, 0.0, 0.5)
#If all values are positive use the positive half of the colormap
elif (vmin > 0) & (vmax > 0):
cmap = truncate_colormap(cmap, 0.5, 1.0)
#Otherwise, there are positive and negative values so the colormap so zero is the midpoint
else:
cmap = shift_colormap(cmap, start=0.0, midpoint=1 - vmax/(vmax + abs(vmin)), stop=1.)
#Create scalar mappable for colorbar and stretch colormap across range of data values
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
#Plot GWR parameters
data.plot(var1, cmap=sm.cmap, ax=ax0, vmin=vmin, vmax=vmax, **kwargs1)
if (gwr_t == 0).any():
data[gwr_t == 0].plot(color='lightgrey', ax=ax0, **kwargs2)
#Plot MGWR parameters
data.plot(var2, cmap=sm.cmap, ax=ax1, vmin=vmin, vmax=vmax, **kwargs1)
if (mgwr_t == 0).any():
data[mgwr_t == 0].plot(color='lightgrey', ax=ax1, **kwargs2)
#Set figure options and plot
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cax = fig.add_axes([0.92, 0.14, 0.03, 0.75])
sm._A = []
cbar = fig.colorbar(sm, cax=cax)
cbar.ax.tick_params(labelsize=50)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if savefig is not None:
plt.savefig(savefig)
plt.show()
|
"""Template loader for app-namespace"""
import errno
import io
import os
from collections import OrderedDict
import django
from django.apps import apps
try:
from django.template import Origin
except ImportError: # pragma: no cover
class Origin(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
from django.template import TemplateDoesNotExist
from django.template.loaders.base import Loader as BaseLoader
from django.utils._os import safe_join
from django.utils._os import upath
from django.utils.functional import cached_property
class NamespaceOrigin(Origin):
def __init__(self, app_name, *args, **kwargs):
self.app_name = app_name
super(NamespaceOrigin, self).__init__(*args, **kwargs)
class Loader(BaseLoader):
"""
App namespace loader for allowing you to both extend and override
a template provided by an app at the same time.
"""
is_usable = True
def __init__(self, *args, **kwargs):
super(Loader, self).__init__(*args, **kwargs)
self._already_used = []
def reset(self, mandatory_on_django_18):
"""
Empty the cache of paths already used.
"""
if django.VERSION[1] == 8:
if not mandatory_on_django_18:
return
self._already_used = []
def get_app_template_path(self, app, template_name):
"""
Return the full path of a template name located in an app.
"""
return safe_join(self.app_templates_dirs[app], template_name)
@cached_property
def app_templates_dirs(self):
"""
Build a cached dict with settings.INSTALLED_APPS as keys
and the 'templates' directory of each application as values.
"""
app_templates_dirs = OrderedDict()
for app_config in apps.get_app_configs():
templates_dir = os.path.join(
getattr(app_config, 'path', '/'), 'templates')
if os.path.isdir(templates_dir):
templates_dir = upath(templates_dir)
app_templates_dirs[app_config.name] = templates_dir
app_templates_dirs[app_config.label] = templates_dir
return app_templates_dirs
def get_contents(self, origin):
"""
Try to load the origin.
"""
try:
path = self.get_app_template_path(
origin.app_name, origin.template_name)
with io.open(path, encoding=self.engine.file_charset) as fp:
return fp.read()
except KeyError:
raise TemplateDoesNotExist(origin)
except IOError as error:
if error.errno == errno.ENOENT:
raise TemplateDoesNotExist(origin)
raise
def get_template_sources(self, template_name):
"""
Build a list of Origin to load 'template_name' splitted with ':'.
The first item is the name of the application and the last item
is the true value of 'template_name' provided by the specified
application.
"""
if ':' not in template_name:
self.reset(True)
return
app, template_path = template_name.split(':')
if app:
yield NamespaceOrigin(
app_name=app,
name='app_namespace:%s:%s' % (app, template_name),
template_name=template_path,
loader=self)
return
self.reset(False)
for app in self.app_templates_dirs:
file_path = self.get_app_template_path(app, template_path)
if file_path in self._already_used:
continue
self._already_used.append(file_path)
yield NamespaceOrigin(
app_name=app,
name='app_namespace:%s:%s' % (app, template_name),
template_name=template_path,
loader=self)
def load_template_source(self, *ka):
"""
Backward compatible method for Django < 2.0.
"""
template_name = ka[0]
for origin in self.get_template_sources(template_name):
try:
return self.get_contents(origin), origin.name
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
|
from sympy.core import (Basic, Expr, S, C, Symbol, Wild, Add, sympify, diff,
oo, Tuple, Dummy, Equality, Interval)
from sympy.core.symbol import Dummy
from sympy.core.compatibility import ordered_iter
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.rationaltools import ratint
from sympy.integrals.risch import heurisch
from sympy.utilities import xthreaded, flatten, any, all
from sympy.polys import Poly, PolynomialError
from sympy.solvers import solve
from sympy.functions import Piecewise, sign
from sympy.geometry import Curve
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.series import limit
def _free_symbols(function, limits):
"""
Return the symbols that will exist when the function is evaluated as
an Integral or a Sum. This is useful if one is trying to determine
whether the result is dependent on a certain symbol or not.
This is written as a private function so it can be used from Sum as well
as from Integral.
"""
if function.is_zero:
return set()
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
if len(xab) == 3 and xab[1] == xab[2]:
# if two limits are the same the integral is 0
# and there are no symbols
return set()
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
def _process_limits(*symbols):
"""Convert the symbols-related limits into propert limits,
storing them as Tuple(symbol, lower, upper). The sign of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the sign is changed.
"""
limits = []
sign = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif ordered_iter(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
sign *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, sign
class Integral(Expr):
"""Represents unevaluated integral."""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
if symbols:
limits, sign = _process_limits(*symbols)
else:
# no symbols provided -- let's compute full anti-derivative
limits, sign = [Tuple(s) for s in function.free_symbols], 1
if len(limits) != 1:
raise ValueError("specify integration variables to integrate %s" % function)
while isinstance(function, Integral):
# denest the integrand
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [sign*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = all(s.is_commutative for s in obj.free_symbols)
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def function(self):
return self._args[0]
@property
def limits(self):
return self._args[1:]
@property
def variables(self):
"""Return a list of the integration variables.
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).variables
[i]
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral is dependent on a certain
symbol or not.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
set([y])
"""
return _free_symbols(self.function, self.limits)
@property
def is_zero(self):
"""Since Integral doesn't autosimplify it it useful to see if
it would simplify to zero or not in a trivial manner, i.e. when
the function is 0 or two limits of a definite integral are the same.
This is a very naive and quick test, not intended to check for special
patterns like Integral(sin(m*x)*cos(n*x), (x, 0, 2*pi)) == 0.
"""
if (self.function.is_zero or
any(len(xab) == 3 and xab[1] == xab[2] for xab in self.limits)):
return True
if not self.free_symbols and self.function.is_number:
# the integrand is a number and the limits are numerical
return False
@property
def is_number(self):
"""
Return True if the Integral will result in a number, else False.
sympy considers anything that will result in a number to have
is_number == True.
>>> from sympy import log
>>> log(2).is_number
True
Integrals are a special case since they contain symbols that can
be replaced with numbers. Whether the integral can be done or not is
another issue. But answering whether the final result is a number is
not difficult.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x).is_number
False
>>> Integral(x, y).is_number
False
>>> Integral(x, (y, 1, x)).is_number
False
>>> Integral(x, (y, 1, 2)).is_number
False
>>> Integral(x, (y, 1, 1)).is_number
True
>>> Integral(x, (x, 1, 2)).is_number
True
>>> Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number
True
>>> Integral(1, x, (x, 1, 2)).is_number
True
"""
integrand, limits = self.function, self.limits
isyms = integrand.atoms(Symbol)
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue # it may be removed later
elif len(xab) == 3 and xab[1] == xab[2]: # XXX naive equality test
return True # integral collapsed
if xab[0] in isyms:
# take it out of the symbols since it will be replace
# with whatever the limits of the integral are
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
# if there are no surviving symbols then the result is a number
return len(isyms) == 0
def as_dummy(self):
"""
Replace instances of the integration variables with their dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an Integral. The "integral at" limit
that has a length of 1 will be explicated with its length-2
equivalent.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x).as_dummy()
Integral(_x, (_x, x))
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If there were no dummies in the original expression, then the
output of this function will show which symbols cannot be
changed by subs(), those with an underscore prefix.
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in xrange(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
xab = xab*2
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return Integral(f, *limits)
def transform(self, x, mapping, inverse=False):
"""
Replace the integration variable x in the integrand with the
expression given by `mapping`, e.g. 2*x or 1/x. The integrand and
endpoints are rescaled to preserve the value of the original
integral.
In effect, this performs a variable substitution (although the
symbol remains unchanged; follow up with subs to obtain a
new symbol.)
With inverse=True, the inverse transformation is performed.
The mapping must be uniquely invertible (e.g. a linear or linear
fractional transformation).
"""
if x not in self.variables:
return self
limits = self.limits
function = self.function
y = Dummy('y')
inverse_mapping = solve(mapping.subs(x, y) - x, y)
if len(inverse_mapping) != 1 or x not in inverse_mapping[0].free_symbols:
raise ValueError("The mapping must be uniquely invertible")
inverse_mapping = inverse_mapping[0]
if inverse:
mapping, inverse_mapping = inverse_mapping, mapping
function = function.subs(x, mapping) * mapping.diff(x)
def calc_limit(a, b):
"""replace x with a, using subs if possible, otherwise limit
where sign of b is considered"""
wok = inverse_mapping.subs(x, a)
if wok is S.NaN or wok.is_bounded is False and a.is_bounded:
return limit(sign(b)*inverse_mapping, x, a)
return wok
newlimits = []
for xab in limits:
sym = xab[0]
if sym == x and len(xab) == 3:
a, b = xab[1:]
a, b = calc_limit(a, b), calc_limit(b, a)
if a == b:
raise ValueError("The mapping must transform the "
"endpoints into separate points")
if a > b:
a, b = b, a
function = -function
newlimits.append((sym, a, b))
else:
newlimits.append(xab)
return Integral(function, *newlimits)
def doit(self, **hints):
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
# check for the trivial case of equal upper and lower limits
if self.is_zero:
return S.Zero
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# There is no trivial answer, so continue
undone_limits = []
ulj = set() # free symbols of any undone limits' upper and lower limits
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
continue
antideriv = self._eval_integral(function, xab[0])
if antideriv is None:
undone_limits.append(xab)
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
if len(xab) == 2:
x, b = xab
a = None
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
function = antideriv._eval_interval(x, a, b)
if undone_limits:
return self.func(*([function] + undone_limits))
return function
def _eval_expand_basic(self, deep=True, **hints):
from sympy import flatten
if not deep:
return self
else:
return Integral(self.function.expand(deep=deep, **hints),\
flatten(*self.limits))
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References:
[1] http://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
[2] http://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, (y, y), (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
set([x])
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 1116
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = Integral(f, *tuple(limits))
# assemble the pieces
rv = 0
if b is not None:
rv += f.subs(x, b)*diff(b, sym)
if a is not None:
rv -= f.subs(x, a)*diff(a, sym)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
rv += Integral(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x):
"""Calculate the anti-derivative to the function f(x).
This is a powerful function that should in theory be able to integrate
everything that can be integrated. If you find something, that it
doesn't, it is easy to implement it.
(1) Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials)
- functions non-integrable by any of the following algorithms (e.g.
exp(-x**2))
(2) Integration of rational functions:
(a) using apart() - apart() is full partial fraction decomposition
procedure based on Bronstein-Salvy algorithm. It gives formal
decomposition with no polynomial factorization at all (so it's fast
and gives the most general results). However it needs much better
implementation of RootsOf class (if fact any implementation).
(b) using Trager's algorithm - possibly faster than (a) but needs
implementation :)
(3) Whichever implementation of pmInt (Mateusz, Kirill's or a
combination of both).
- this way we can handle efficiently huge class of elementary and
special functions
(4) Recursive Risch algorithm as described in Bronstein's integration
tutorial.
- this way we can handle those integrable functions for which (3)
fails
(5) Powerful heuristics based mostly on user defined rules.
- handle complicated, rarely used cases
"""
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly):
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if f.func is Piecewise:
return f._eval_integral(x)
# let's cut it short if `f` does not depend on `x`
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None:
return poly.integrate().as_expr()
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One:
parts.append(coeff*x)
continue
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x):
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = C.log(g.base)
else:
h = g.base**(g.exp + 1) / (g.exp + 1)
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x):
parts.append(coeff * ratint(g, x))
continue
# g(x) = Mul(trig)
h = trigintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# fall back to the more general algorithm
try:
h = heurisch(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# out the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = f.expand(mul=True, deep=False)
if f.is_Add:
return self._eval_integral(f, x)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x):
for term in self.function.lseries(x):
yield integrate(term, *self.limits)
def _eval_nseries(self, x, n, logx):
terms, order = self.function.nseries(x, n=n, logx=logx).as_coeff_add(C.Order)
return integrate(terms, *self.limits) + Add(*order)*x
def _eval_subs(self, old, new):
"""
Substitute old with new in the integrand and the limits, but don't
change anything that is (or corresponds to) a variable of integration.
The normal substitution semantics -- traversing all arguments looking
for matching patterns -- should not be applied to the Integrals since
changing the integration variables should also entail a change in the
integration limits (which should be done with the transform method). So
this method just makes changes in the integrand and the limits.
Not all instances of a given variable are conceptually the same: the
first argument of the limit tuple and any corresponding variable in
the integrand are dummy variables while every other symbol is a symbol
that will be unchanged when the integral is evaluated. For example, in
Integral(x + a, (a, a, b))
the dummy variables are shown below with angle-brackets around them and
will not be changed by this function:
Integral(x + <a>, (<a>, a, b))
If you want to change the lower limit to 1 there is no reason to
prohibit this since it is not conceptually related to the integration
variable, <a>. Nor is there reason to disallow changing the b to 1.
If a second limit were added, however, as in:
Integral(x + a, (a, a, b), (b, 1, 2))
the dummy variables become:
Integral(x + <a>, (<a>, a, <b>), (<b>, a, b))
Note that the `b` of the first limit is now a dummy variable since `b` is a
dummy variable in the second limit.
Summary: no variable of the integrand or limit can be the target of
substitution if it appears as a variable of integration in a limit
positioned to the right of it.
>>> from sympy import Integral
>>> from sympy.abc import a, b, c, x, y
>>> i = Integral(a + x, (a, a, 3), (b, x, c))
>>> list(i.free_symbols) # only these can be changed
[x, a, c]
>>> i.subs(a, c) # note that the variable of integration is unchanged
Integral(a + x, (a, c, 3), (b, x, c))
>>> i.subs(a + x, b) == i # there is no x + a, only x + <a>
True
>>> i.subs(x, y - c)
Integral(a - c + y, (a, a, 3), (b, -c + y, c))
"""
if self == old:
return new
integrand, limits = self.function, self.limits
old_atoms = old.free_symbols
limits = list(limits)
# make limits explicit if they are to be targeted by old:
# Integral(x, x) -> Integral(x, (x, x)) if old = x
if old.is_Symbol:
for i, l in enumerate(limits):
if len(l) == 1 and l[0] == old:
limits[i] = Tuple(l[0], l[0])
dummies = set()
for i in xrange(-1, -len(limits) - 1, -1):
xab = limits[i]
if not dummies.intersection(old_atoms):
limits[i] = Tuple(xab[0],
*[l.subs(old, new) for l in xab[1:]])
dummies.add(xab[0])
if not dummies.intersection(old_atoms):
integrand = integrand.subs(old, new)
return Integral(integrand, *limits)
def as_sum(self, n, method="midpoint"):
"""
Approximates the integral by a sum.
method ... one of: left, right, midpoint
This is basically just the rectangle method [1], the only difference is
where the function value is taken in each interval.
[1] http://en.wikipedia.org/wiki/Rectangle_method
**method = midpoint**:
Uses the n-order midpoint rule to evaluate the integral.
Midpoint rule uses rectangles approximation for the given area (e.g.
definite integral) of the function with heights equal to the point on
the curve exactly in the middle of each interval (thus midpoint
method). See [1] for more information.
Examples:
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> from sympy.integrals import Integral
>>> e = Integral(sqrt(x**3+1), (x, 2, 10))
>>> e
Integral((x**3 + 1)**(1/2), (x, 2, 10))
>>> e.as_sum(4, method="midpoint")
4*7**(1/2) + 6*14**(1/2) + 4*86**(1/2) + 2*730**(1/2)
>>> e.as_sum(4, method="midpoint").n()
124.164447891310
>>> e.n()
124.616199194723
**method=left**:
Uses the n-order rectangle rule to evaluate the integral, at each
interval the function value is taken at the left hand side of the
interval.
Examples:
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> e = Integral(sqrt(x**3+1), (x, 2, 10))
>>> e
Integral((x**3 + 1)**(1/2), (x, 2, 10))
>>> e.as_sum(4, method="left")
6 + 2*65**(1/2) + 2*217**(1/2) + 6*57**(1/2)
>>> e.as_sum(4, method="left").n()
96.8853618335341
>>> e.n()
124.616199194723
"""
limits = self.limits
if len(limits) > 1:
raise NotImplementedError("Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if n <= 0:
raise ValueError("n must be > 0")
if n == oo:
raise NotImplementedError("Infinite summation not yet implemented")
sym, lower_limit, upper_limit = limit
dx = (upper_limit - lower_limit)/n
result = 0.
for i in range(n):
if method == "midpoint":
xi = lower_limit + i*dx + dx/2
elif method == "left":
xi = lower_limit + i*dx
elif method == "right":
xi = lower_limit + i*dx + dx
else:
raise NotImplementedError("Unknown method %s" % method)
result += self.function.subs(sym, xi)
return result*dx
@xthreaded
def integrate(*args, **kwargs):
"""integrate(f, var, ...)
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is multiple
integration.
Also, if no var is specified at all, then the full anti-derivative of f is
returned. This is equivalent to integrating f over all its variables.
**Examples**
>>> from sympy import integrate, log
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
See also the doctest of Integral._eval_integral(), which explains
thoroughly the strategy that SymPy uses for integration.
"""
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(deep = False)
else:
return integral
@xthreaded
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
--------
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*2**(1/2)
"""
F = sympify(field)
if not F:
raise ValueError("Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not ordered_iter(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * dldt**(S(1)/2)
integral = Integral(Ft, curve.limits).doit(deep = False)
return integral
|
from zplot import *
t = table('horizontalintervals.data')
canvas = postscript('horizontalintervals.eps')
d = drawable(canvas, coord=[50,30], xrange=[0,900],
yrange=[0,t.getmax('nodes')])
axis(d, xtitle='Throughput (MB)', xauto=[0,900,300],
ytitle='Nodes', yauto=[0,t.getmax('nodes'),1])
p = plotter()
p.horizontalintervals(d, t, yfield='nodes', xlofield='min', xhifield='max')
canvas.render()
|
from __future__ import unicode_literals
from ..preprocess import TCorrelate
def test_TCorrelate_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
num_threads=dict(nohash=True,
usedefault=True,
),
out_file=dict(argstr='-prefix %s',
name_source='xset',
name_template='%s_tcorr',
),
outputtype=dict(),
pearson=dict(argstr='-pearson',
),
polort=dict(argstr='-polort %d',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
xset=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-2,
),
yset=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-1,
),
)
inputs = TCorrelate.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TCorrelate_outputs():
output_map = dict(out_file=dict(),
)
outputs = TCorrelate.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
logger = logging.getLogger('magiccontent.default_auth')
def naive_can_edit(request):
logger.warning(
('naive_can_edit method has been used, please provide a '
'GALLERY_PAGE_IS_OWNER_METHOD to improve the content security'))
if request.user.is_authenticated() and request.user.is_staff:
return True
return False
|
from __future__ import absolute_import
from sentry.identity.vsts import VSTSIdentityProvider
from sentry.integrations.exceptions import IntegrationError
from sentry.integrations.vsts import VstsIntegration, VstsIntegrationProvider
from sentry.models import (
Integration, IntegrationExternalProject, OrganizationIntegration, Repository,
Project
)
from sentry.plugins import plugins
from tests.sentry.plugins.testutils import VstsPlugin # NOQA
from .testutils import VstsIntegrationTestCase, CREATE_SUBSCRIPTION
class VstsIntegrationProviderTest(VstsIntegrationTestCase):
# Test data setup in ``VstsIntegrationTestCase``
def test_basic_flow(self):
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
assert integration.external_id == self.vsts_account_id
assert integration.name == self.vsts_account_name
metadata = integration.metadata
assert metadata['scopes'] == list(VSTSIdentityProvider.oauth_scopes)
assert metadata['subscription']['id'] == \
CREATE_SUBSCRIPTION['publisherInputs']['tfsSubscriptionId']
assert metadata['domain_name'] == '{}.visualstudio.com'.format(
self.vsts_account_name
)
def test_migrate_repositories(self):
accessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
inaccessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name='NotReachable',
url='https://randoaccount.visualstudio.com/Product/_git/NotReachable',
provider='visualstudio',
external_id='123456789',
)
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
assert Repository.objects.get(
id=accessible_repo.id,
).integration_id == integration.id
assert Repository.objects.get(
id=inaccessible_repo.id,
).integration_id is None
def setupPluginTest(self):
self.project = Project.objects.create(
organization_id=self.organization.id,
)
self.plugin = plugins.get('vsts')
self.plugin.enable(self.project)
def test_disabled_plugin_when_fully_migrated(self):
self.setupPluginTest()
Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
# Enabled before Integration installation
assert 'vsts' in [p.slug for p in plugins.for_project(self.project)]
self.assert_installation()
# Disabled
assert 'vsts' not in [p.slug for p in plugins.for_project(self.project)]
def test_doesnt_disable_plugin_when_partially_migrated(self):
self.setupPluginTest()
# Repo accessible by new Integration
Repository.objects.create(
organization_id=self.organization.id,
name=self.project_a['name'],
url='https://{}.visualstudio.com/DefaultCollection/_git/{}'.format(
self.vsts_account_name,
self.repo_name,
),
provider='visualstudio',
external_id=self.repo_id,
)
# Inaccessible Repo - causes plugin to stay enabled
Repository.objects.create(
organization_id=self.organization.id,
name='NotReachable',
url='https://randoaccount.visualstudio.com/Product/_git/NotReachable',
provider='visualstudio',
external_id='123456789',
)
self.assert_installation()
# Still enabled
assert 'vsts' in [p.slug for p in plugins.for_project(self.project)]
def test_build_integration(self):
state = {
'account': {
'AccountName': self.vsts_account_name,
'AccountId': self.vsts_account_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
}
integration = VstsIntegrationProvider()
integration_dict = integration.build_integration(state)
assert integration_dict['name'] == self.vsts_account_name
assert integration_dict['external_id'] == self.vsts_account_id
assert integration_dict['metadata']['domain_name'] == \
'{}.visualstudio.com'.format(self.vsts_account_name)
assert integration_dict['user_identity']['type'] == 'vsts'
assert integration_dict['user_identity']['external_id'] == \
self.vsts_account_id
assert integration_dict['user_identity']['scopes'] == sorted(
VSTSIdentityProvider.oauth_scopes)
def test_webhook_subscription_created_once(self):
self.assert_installation()
state = {
'account': {
'AccountName': self.vsts_account_name,
'AccountId': self.vsts_account_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
}
# The above already created the Webhook, so subsequent calls to
# ``build_integration`` should omit that data.
data = VstsIntegrationProvider().build_integration(state)
assert 'subscription' not in data['metadata']
def test_fix_subscription(self):
external_id = '1234567890'
Integration.objects.create(
metadata={},
provider='vsts',
external_id=external_id,
)
data = VstsIntegrationProvider().build_integration({
'account': {
'AccountName': self.vsts_account_name,
'AccountId': external_id,
},
'instance': '{}.visualstudio.com'.format(self.vsts_account_name),
'identity': {
'data': {
'access_token': self.access_token,
'expires_in': '3600',
'refresh_token': self.refresh_token,
'token_type': 'jwt-bearer',
},
},
})
assert external_id == data['external_id']
subscription = data['metadata']['subscription']
assert subscription['id'] is not None and subscription['secret'] is not None
class VstsIntegrationTest(VstsIntegrationTestCase):
def test_get_organization_config(self):
self.assert_installation()
integration = Integration.objects.get(provider='vsts')
fields = integration.get_installation(
integration.organizations.first().id
).get_organization_config()
assert [field['name'] for field in fields] == [
'sync_status_forward',
'sync_forward_assignment',
'sync_comments',
'sync_status_reverse',
'sync_reverse_assignment',
]
def test_update_organization_config_remove_all(self):
self.assert_installation()
model = Integration.objects.get(provider='vsts')
integration = VstsIntegration(model, self.organization.id)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id,
)
data = {
'sync_status_forward': {},
'other_option': 'hello',
}
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=1,
resolved_status='ResolvedStatus1',
unresolved_status='UnresolvedStatus1',
)
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=2,
resolved_status='ResolvedStatus2',
unresolved_status='UnresolvedStatus2',
)
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=3,
resolved_status='ResolvedStatus3',
unresolved_status='UnresolvedStatus3',
)
integration.update_organization_config(data)
external_projects = IntegrationExternalProject.objects \
.all() \
.values_list('external_id', flat=True)
assert list(external_projects) == []
config = OrganizationIntegration.objects.get(
organization_id=org_integration.organization_id,
integration_id=org_integration.integration_id
).config
assert config == {
'sync_status_forward': False,
'other_option': 'hello',
}
def test_update_organization_config(self):
self.assert_installation()
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id,
)
model = Integration.objects.get(provider='vsts')
integration = VstsIntegration(model, self.organization.id)
# test validation
data = {
'sync_status_forward': {
1: {
'on_resolve': '',
'on_unresolve': 'UnresolvedStatus1',
},
},
}
with self.assertRaises(IntegrationError):
integration.update_organization_config(data)
data = {
'sync_status_forward': {
1: {
'on_resolve': 'ResolvedStatus1',
'on_unresolve': 'UnresolvedStatus1',
},
2: {
'on_resolve': 'ResolvedStatus2',
'on_unresolve': 'UnresolvedStatus2',
},
4: {
'on_resolve': 'ResolvedStatus4',
'on_unresolve': 'UnresolvedStatus4',
},
},
'other_option': 'hello',
}
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=1,
resolved_status='UpdateMe',
unresolved_status='UpdateMe',
)
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=2,
resolved_status='ResolvedStatus2',
unresolved_status='UnresolvedStatus2',
)
IntegrationExternalProject.objects.create(
organization_integration_id=org_integration.id,
external_id=3,
resolved_status='ResolvedStatus3',
unresolved_status='UnresolvedStatus3',
)
integration.update_organization_config(data)
external_projects = IntegrationExternalProject.objects \
.all() \
.order_by('external_id')
assert external_projects[0].external_id == '1'
assert external_projects[0].resolved_status == 'ResolvedStatus1'
assert external_projects[0].unresolved_status == 'UnresolvedStatus1'
assert external_projects[1].external_id == '2'
assert external_projects[1].resolved_status == 'ResolvedStatus2'
assert external_projects[1].unresolved_status == 'UnresolvedStatus2'
assert external_projects[2].external_id == '4'
assert external_projects[2].resolved_status == 'ResolvedStatus4'
assert external_projects[2].unresolved_status == 'UnresolvedStatus4'
config = OrganizationIntegration.objects.get(
organization_id=org_integration.organization_id,
integration_id=org_integration.integration_id
).config
assert config == {
'sync_status_forward': True,
'other_option': 'hello',
}
|
"""A way to read and write structs to a binary file, with fast access
Licensed under the 3-clause BSD License:
Copyright (c) 2011-2014, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, time
import struct
class StructFile(object):
"""A file which contains structs"""
def __init__(self, structfmt, fname):
"""Initializes a structfile using the given structfmt and fname.
The file is opened in the given mode ('rb' as default)."""
self.struct = struct.Struct(structfmt)
self.size = self.struct.size
self.fname = fname
if not os.path.exists(fname):
open(fname, 'wb').close()
self.readptr = open(fname, 'rb')
try:
self.writeptr = open(fname, 'r+b')
except IOError:
self.writeptr = None
def __len__(self):
"""Returns the number of structs in this file"""
f = self.readptr
f.seek(0, os.SEEK_END)
n = f.tell()
return n/self.size
def __iter__(self):
"""Iterates over structs in this file, from the beginning"""
f = open(self.fname, 'rb')
while 1:
try:
yield self.struct.unpack(f.read(self.size))
except EOFError:
break
def __getitem__(self, i):
"""Returns the i'th struct.
Negative indices work as well.
Raised IndexError on invalid index.
"""
l = len(self)
if i < 0:
i += l
if i >= l: raise IndexError
f = self.readptr
f.seek(self.size*i)
return self.struct.unpack(f.read(self.size))
def __setitem__(self, i, val):
"""Sets the i'th struct. The file must already have this many structs.
Negative indices work as well.
Raised IndexError on invalid index.
Raises IOError if the file doesn't have write permissions.
"""
l = len(self)
if i < 0:
i += l
if i >= l: raise IndexError
f = self.writeptr
if not f: raise IOError
f.seek(self.size*i)
f.write(self.struct.pack(*val))
def flush(self):
"""Flushes the file if any changes have been made.
Raises IOError if the file doesn't have write permissions.
"""
if not self.writeptr: raise IOError
self.writeptr.flush()
def append(self, val):
"""Adds the given value to the end of the file.
Raises IOError if the file doesn't have write permissions.
"""
f = self.writeptr
if not f: raise IOError
f.seek(0, os.SEEK_END)
f.write(self.struct.pack(*val))
|
import platform
from . import meta
from . import parser
from . import tools
from . import exc
Log = tools.minimal_logger(__name__)
def get_parser(**kw):
"""
Detect the proper parser class, and return it instantiated.
Optional Arguments:
parser
The parser class to use instead of detecting the proper one.
distro
The distro to parse for (used for testing).
kernel
The kernel to parse for (used for testing).
ifconfig
The ifconfig (stdout) to pass to the parser (used for testing).
"""
parser = kw.get('parser', None)
ifconfig = kw.get('ifconfig', None)
if not parser:
distro = kw.get('distro', platform.system())
full_kernel = kw.get('kernel', platform.uname()[2])
kernel = '.'.join(full_kernel.split('.')[0:2])
if distro == 'Linux':
if float(kernel) < 3.3:
from .parser import Linux2Parser as LinuxParser
else:
from .parser import LinuxParser
print LinuxParser
parser = LinuxParser(ifconfig=ifconfig)
elif distro in ['Darwin', 'MacOSX']:
from .parser import MacOSXParser
parser = MacOSXParser(ifconfig=ifconfig)
elif distro in ['FreeBSD']:
from .parser import FreeBSDParser
parser = FreeBSDParser(ifconfig=ifconfig)
else:
raise exc.IfcfgParserError("Unknown distro type '%s'." % distro)
Log.debug("Distro detected as '%s'" % distro)
Log.debug("Using '%s'" % parser)
return parser
def interfaces():
"""
Return just the parsed interfaces dictionary from the proper parser.
"""
parser = get_parser()
return parser.interfaces
def default_interface():
"""
Return just the default interface device dictionary.
"""
parser = get_parser()
return parser.default_interface
|
from numpy.testing import assert_allclose, assert_equal
from . import plt
from .. import utils
def test_path_data():
circle = plt.Circle((0, 0), 1)
vertices, codes = utils.SVG_path(circle.get_path())
assert_allclose(vertices.shape, (25, 2))
assert_equal(codes, ['M', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'Z'])
def test_linestyle():
linestyles = {'solid': 'none', '-': 'none',
#'dashed': '6,6', '--': '6,6',
#'dotted': '2,2', ':': '2,2',
#'dashdot': '4,4,2,4', '-.': '4,4,2,4',
'': None, 'None': None}
for ls, result in linestyles.items():
line, = plt.plot([1, 2, 3], linestyle=ls)
assert_equal(utils.get_dasharray(line), result)
def test_axis_w_fixed_formatter():
positions, labels = [0, 1, 10], ['A','B','C']
plt.xticks(positions, labels)
props = utils.get_axis_properties(plt.gca().xaxis)
assert_equal(props['tickvalues'], positions)
assert_equal(props['tickformat'], labels)
|
"""Operations on directions in 3D conformal geometric algebra."""
from __pyversor__.c3d.directions import (
DirectionVector, DirectionBivector, DirectionTrivector)
|
"""
Responsible for generating the testing decoders based on
parsed table representations.
"""
import dgen_core
import dgen_opt
import dgen_output
import dgen_decoder
import dgen_actuals
import dgen_baselines
"""The current command line arguments to use"""
_cl_args = {}
CLASS = '%(DECODER)s_%(rule)s'
NAMED_CLASS = 'Named%(DECODER)s_%(rule)s'
INSTANCE = '%(DECODER_class)s_instance_'
BASE_TESTER='%(decoder_base)sTester%(base_test_case)s'
BASE_BASE_TESTER='%(decoder_base)sTester%(qualifier)s'
DECODER_TESTER='%(baseline)sTester_%(test_case)s'
def _safety_to_check(safety):
return [s for s in safety if not isinstance(s, str)]
def _interesting_patterns(patterns):
""" Filters out non-interesting patterns."""
# Only include rows not corresponding to rule pattern,
# and not always true.
return [ p for p in patterns if (
(not p.column or p.column.name() != '$pattern')
and not p.matches_any())]
def _install_action(decoder, action, values):
"""Install common names needed to generate code for the given action,
and adds it to the values map.
"""
# This code is somewhat inefficient in that most cases, most of the
# added strings are not needed. On the other hand, by having a
# single routine that generates all action specific names at one
# spot, it is much easier to change definitions.
values['baseline'] = action.baseline()
values['actual'] = action.actual()
values['decoder_base'] = decoder.base_class(values['baseline'])
values['rule'] = action.rule()
values['qualifier'] = ''.join([s for s in action.safety()
if isinstance(s, str)])
if action.constraints():
values['qualifier'] += (action.constraints().other
if action.constraints().other else '')
else:
values['qualifier'] =''
values['pattern'] = action.pattern()
# Add dummies for row cases, in case not set up. See
# function _install_row_cases) for more details on these fields.
for field in [ 'base_test_case', 'test_case', 'test_pattern' ]:
if not values.get(field):
values[field] = ''
values['baseline_class'] = _decoder_replace(CLASS, 'baseline') % values
values['actual_class'] = _decoder_replace(CLASS, 'actual') % values
_install_baseline_and_actuals('named_DECODER_class', NAMED_CLASS, values)
_install_baseline_and_actuals('DECODER_instance', INSTANCE, values)
values['base_tester'] = BASE_TESTER % values
values['base_base_tester'] = BASE_BASE_TESTER % values
values['decoder_tester'] = DECODER_TESTER % values
def _decoder_replace(string, basis):
return string.replace('DECODER', basis)
def _install_key_pattern(key, pattern, basis, values):
# Replace DECODER in key and pattern with basis, then
# install into values.
values[_decoder_replace(key, basis)] = (
_decoder_replace(pattern, basis) % values)
def _install_baseline_and_actuals(key, pattern, values):
# Replace DECODER with 'baseline' and 'actual', apply it
# to the key and pattern, and then install into values.
for basis in ['baseline', 'actual']:
_install_key_pattern(key, pattern, basis, values)
def _generate_baseline_and_actual(code, symbol, decoder,
values, out, actions=['rule']):
""" Generates code to define the given symbol. Does so for both
baseline and actual decoders, filtering using actions.
code - The code to generate.
symbol - The symbol being defined.
decoder - The decoder (tables) to use.
values - The name map to use to generate code.
actions - The fields to keep when generating code.
"""
generated_symbols = set()
# Generate one for each type of basline decoder.
baseline_actions = actions[:]
baseline_actions.insert(0, 'baseline');
baseline_code = _decoder_replace(code, 'baseline')
baseline_symbol = _decoder_replace(symbol, 'baseline');
for d in decoder.action_filter(baseline_actions).decoders():
_install_action(decoder, d, values);
sym_name = (baseline_symbol % values)
if sym_name not in generated_symbols:
out.write(baseline_code % values)
generated_symbols.add(sym_name)
# Generate one for each actual type that is different than the
# baseline.
actual_actions = actions[:]
actual_actions.insert(0, 'actual-not-baseline')
actual_code = _decoder_replace(code, 'actual')
actual_symbol = _decoder_replace(symbol, 'actual')
for d in decoder.action_filter(actual_actions).decoders():
# Note: 'actual-not-baseline' sets actual to None if same as baseline.
if d.actual():
_install_action(decoder, d, values);
sym_name = (actual_symbol % values)
if sym_name not in generated_symbols:
out.write(actual_code % values)
generated_symbols.add(sym_name)
NAMED_BASES_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
namespace nacl_arm_test {
"""
GENERATED_BASELINE_HEADER="""
/*
* Define named class decoders for each automatically generated baseline
* decoder.
*/
"""
NAMED_GEN_BASE_DECLARE="""class Named%(gen_base)s
: public NamedClassDecoder {
public:
Named%(gen_base)s()
: NamedClassDecoder(decoder_, "%(gen_base)s")
{}
private:
nacl_arm_dec::%(gen_base)s decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(Named%(gen_base)s);
};
"""
NAMED_BASES_H_FOOTER="""
} // namespace nacl_arm_test
"""
NAMED_BASES_H_SUFFIX = '_named_bases.h'
def generate_named_bases_h(decoder, decoder_name, filename, out, cl_args):
"""Defines named classes needed for testing generated baselines.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith(NAMED_BASES_H_SUFFIX)
_cl_args = cl_args
decoder = dgen_baselines.AddBaselinesToDecoder(decoder)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len(NAMED_BASES_H_SUFFIX)],
'decoder_name': decoder_name,
}
out.write(NAMED_BASES_H_HEADER % values)
_generate_generated_baseline(decoder, out)
out.write(NAMED_BASES_H_FOOTER % values)
def _generate_generated_baseline(decoder, out):
""" Generates code to define the given symbol. Does so for
the generated baseline decoders, filtering using actions.
"""
generated_symbols = set()
values = {}
out.write(GENERATED_BASELINE_HEADER % values)
for d in decoder.action_filter(['generated_baseline']).decoders():
gen_base = d.find('generated_baseline')
if gen_base and gen_base not in generated_symbols:
values['gen_base'] = gen_base
out.write(NAMED_GEN_BASE_DECLARE % values)
generated_symbols.add(gen_base)
NAMED_CLASSES_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
"""
RULE_CLASSES_HEADER="""
/*
* Define rule decoder classes.
*/
namespace nacl_arm_dec {
"""
RULE_CLASS="""class %(DECODER_class)s
: public %(DECODER)s {
};
"""
RULE_CLASS_SYM="%(DECODER_class)s"
NAMED_DECODERS_HEADER="""} // nacl_arm_dec
namespace nacl_arm_test {
/*
* Define named class decoders for each class decoder.
* The main purpose of these classes is to introduce
* instances that are named specifically to the class decoder
* and/or rule that was used to parse them. This makes testing
* much easier in that error messages use these named classes
* to clarify what row in the corresponding table was used
* to select this decoder. Without these names, debugging the
* output of the test code would be nearly impossible
*/
"""
NAMED_CLASS_DECLARE="""class %(named_DECODER_class)s
: public NamedClassDecoder {
public:
%(named_DECODER_class)s()
: NamedClassDecoder(decoder_, "%(DECODER)s %(rule)s")
{}
private:
nacl_arm_dec::%(DECODER_class)s decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(%(named_DECODER_class)s);
};
"""
NAMED_CLASS_DECLARE_SYM="%(named_DECODER_class)s"
NAMED_CLASSES_H_FOOTER="""
// Defines the default parse action if the table doesn't define
// an action.
class NotImplementedNamed : public NamedClassDecoder {
public:
NotImplementedNamed()
: NamedClassDecoder(decoder_, "not implemented")
{}
private:
nacl_arm_dec::NotImplemented decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(NotImplementedNamed);
};
} // namespace nacl_arm_test
"""
def generate_named_classes_h(decoder, decoder_name, filename, out, cl_args):
"""Defines named classes needed for decoder testing.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('_named_classes.h')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('_named_classes.h')],
'decoder_name': decoder_name,
}
out.write(NAMED_CLASSES_H_HEADER % values)
out.write(RULE_CLASSES_HEADER)
_generate_baseline_and_actual(RULE_CLASS, RULE_CLASS_SYM,
decoder, values, out)
out.write(NAMED_DECODERS_HEADER)
_generate_baseline_and_actual(NAMED_CLASS_DECLARE, NAMED_CLASS_DECLARE_SYM,
decoder, values, out)
out.write(NAMED_CLASSES_H_FOOTER % values)
NAMED_DECODER_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
namespace nacl_arm_test {
// Defines a (named) decoder class selector for instructions
class Named%(decoder_name)s : nacl_arm_dec::DecoderState {
public:
explicit Named%(decoder_name)s();
// Parses the given instruction, returning the named class
// decoder to use.
const NamedClassDecoder& decode_named(
const nacl_arm_dec::Instruction) const;
// Parses the given instruction, returning the class decoder
// to use.
virtual const nacl_arm_dec::ClassDecoder& decode(
const nacl_arm_dec::Instruction) const;
// The following fields define the set of class decoders
// that can be returned by the API function "decode_named". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be bulit once (and reused
// for each call to "decode_named")."""
DECODER_STATE_FIELD="""
const %(named_DECODER_class)s %(DECODER_instance)s;"""
DECODER_STATE_FIELD_NAME="%(named_DECODER_class)s"
DECODER_STATE_DECODER_COMMENTS="""
private:
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction."""
DECODER_STATE_DECODER="""
inline const NamedClassDecoder& decode_%(table)s(
const nacl_arm_dec::Instruction inst) const;"""
NAMED_DECODER_H_FOOTER="""
// Defines default action if parse tables don't define what action
// to take.
const NotImplementedNamed not_implemented_;
};
} // namespace nacl_arm_test
"""
def generate_named_decoder_h(decoder, decoder_name, filename, out, cl_args):
"""Generates the named decoder for testing.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('_named_decoder.h')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('_named_decoder.h')],
'decoder_name': decoder_name,
}
out.write(NAMED_DECODER_H_HEADER % values)
_generate_baseline_and_actual(DECODER_STATE_FIELD, DECODER_STATE_FIELD_NAME,
decoder, values, out)
out.write(DECODER_STATE_DECODER_COMMENTS)
for table in decoder.tables():
values['table'] = table.name
out.write(DECODER_STATE_DECODER % values)
out.write(NAMED_DECODER_H_FOOTER % values)
NAMED_CC_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
using nacl_arm_dec::ClassDecoder;
using nacl_arm_dec::Instruction;
namespace nacl_arm_test {
Named%(decoder_name)s::Named%(decoder_name)s()
{}
"""
PARSE_TABLE_METHOD_HEADER="""
/*
* Implementation of table %(table_name)s.
* Specified by: %(citation)s
*/
const NamedClassDecoder& Named%(decoder_name)s::decode_%(table_name)s(
const nacl_arm_dec::Instruction inst) const {
"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
PARSE_TABLE_METHOD_ROW="""
return %(action)s;
"""
METHOD_DISPATCH_CLOSE=""" }
"""
PARSE_TABLE_METHOD_FOOTER="""
// Catch any attempt to fall through...
return not_implemented_;
}
"""
NAMED_CC_FOOTER="""
const NamedClassDecoder& Named%(decoder_name)s::
decode_named(const nacl_arm_dec::Instruction inst) const {
return decode_%(entry_table_name)s(inst);
}
const nacl_arm_dec::ClassDecoder& Named%(decoder_name)s::
decode(const nacl_arm_dec::Instruction inst) const {
return decode_named(inst).named_decoder();
}
} // namespace nacl_arm_test
"""
def generate_named_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the test decoder in .cc file
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('.cc')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'FILENAME_BASE' : filename[:-len('.cc')],
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(NAMED_CC_HEADER % values)
_generate_decoder_method_bodies(decoder, values, out)
out.write(NAMED_CC_FOOTER % values)
def _generate_decoder_method_bodies(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(
dgen_opt.optimize_rows(
table.action_filter(['baseline', 'rule']).rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation,
out.write(PARSE_TABLE_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write(" UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
if row.action.__class__.__name__ == 'DecoderAction':
_install_action(decoder, row.action, values)
action = '%(baseline_instance)s' % values
elif row.action.__class__.__name__ == 'DecoderMethod':
action = 'decode_%s(inst)' % row.action.name
else:
raise Exception('Bad table action: %s' % row.action)
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
values['action'] = action
out.write(PARSE_TABLE_METHOD_ROW % values)
out.write(METHOD_DISPATCH_CLOSE)
out.write(PARSE_TABLE_METHOD_FOOTER % values)
TEST_CC_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
using nacl_arm_dec::Instruction;
using nacl_arm_dec::ClassDecoder;
using nacl_arm_dec::Register;
using nacl_arm_dec::RegisterList;
namespace nacl_arm_test {
// The following classes are derived class decoder testers that
// add row pattern constraints and decoder restrictions to each tester.
// This is done so that it can be used to make sure that the
// corresponding pattern is not tested for cases that would be excluded
// due to row checks, or restrictions specified by the row restrictions.
"""
CONSTRAINT_TESTER_CLASS_HEADER="""
// %(row_comment)s
class %(base_tester)s
: public %(base_base_tester)s {
public:
%(base_tester)s(const NamedClassDecoder& decoder)
: %(base_base_tester)s(decoder) {}"""
CONSTRAINT_TESTER_RESTRICTIONS_HEADER="""
virtual bool PassesParsePreconditions(
nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder);"""
CONSTRAINT_TESTER_SANITY_HEADER="""
virtual bool ApplySanityChecks(nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder);"""
CONSTRAINT_TESTER_CLASS_CLOSE="""
};
"""
CONSTRAINT_TESTER_PARSE_HEADER="""
bool %(base_tester)s
::PassesParsePreconditions(
nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder) {"""
ROW_CONSTRAINTS_HEADER="""
// Check that row patterns apply to pattern being checked.'"""
PATTERN_CONSTRAINT_RESTRICTIONS_HEADER="""
// Check pattern restrictions of row."""
CONSTRAINT_CHECK="""
// %(comment)s
if (%(code)s) return false;"""
CONSTRAINT_TESTER_CLASS_FOOTER="""
// Check other preconditions defined for the base decoder.
return %(base_base_tester)s::
PassesParsePreconditions(inst, decoder);
}
"""
SAFETY_TESTER_HEADER="""
bool %(base_tester)s
::ApplySanityChecks(nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder) {
NC_PRECOND(%(base_base_tester)s::
ApplySanityChecks(inst, decoder));"""
SAFETY_TESTER_CHECK="""
// safety: %(comment)s
EXPECT_TRUE(%(code)s);"""
DEFS_SAFETY_CHECK="""
// defs: %(comment)s;
EXPECT_TRUE(decoder.defs(inst).IsSame(%(code)s));"""
SAFETY_TESTER_FOOTER="""
return true;
}
"""
TESTER_CLASS_HEADER="""
// The following are derived class decoder testers for decoder actions
// associated with a pattern of an action. These derived classes introduce
// a default constructor that automatically initializes the expected decoder
// to the corresponding instance in the generated DecoderState.
"""
TESTER_CLASS="""
// %(row_comment)s
class %(decoder_tester)s
: public %(base_tester)s {
public:
%(decoder_tester)s()
: %(base_tester)s(
state_.%(baseline_instance)s)
{}
};
"""
TEST_HARNESS="""
// Defines a gtest testing harness for tests.
class %(decoder_name)sTests : public ::testing::Test {
protected:
%(decoder_name)sTests() {}
};
// The following functions test each pattern specified in parse
// decoder tables.
"""
TEST_FUNCTION_ACTUAL_VS_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s baseline_tester;
%(named_actual_class)s actual;
ActualVsBaselineTester a_vs_b_tester(actual, baseline_tester);
a_vs_b_tester.Test("%(pattern)s");
}
"""
TEST_FUNCTION_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s tester;
tester.Test("%(pattern)s");
}
"""
TEST_FUNCTION_BASELINE_VS_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
BvB_%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s old_baseline_tester;
Named%(gen_decoder)s gen_baseline;
BaselineVsBaselineTester b_vs_b_tester(gen_baseline, old_baseline_tester);
b_vs_b_tester.Test("%(pattern)s");
}
"""
TEST_CC_FOOTER="""
} // namespace nacl_arm_test
int main(int argc, char* argv[]) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
"""
def generate_tests_cc(decoder, decoder_name, out, cl_args, tables):
"""Generates pattern tests for the rows in the given list of tables
in the given decoder."""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
decoder = dgen_baselines.AddBaselinesToDecoder(decoder, tables)
baselines = cl_args.get('test-base')
if not baselines: baselines = []
decoder = _decoder_restricted_to_tables(decoder, tables)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'decoder_name': decoder_name,
}
out.write(TEST_CC_HEADER % values)
_generate_constraint_testers(decoder, values, out)
_generate_rule_testers(decoder, values, out)
out.write(TEST_HARNESS % values)
_generate_test_patterns_with_baseline_tests(decoder, values, out, baselines)
out.write(TEST_CC_FOOTER % values)
def _filter_test_action(action, with_patterns, with_rules):
"""Filters the actions to pull out relavant entries, based on whether we
want to include patterns and rules.
"""
action_fields = ['actual', 'baseline', 'generated_baseline',
'constraints'] + dgen_decoder.METHODS
if with_patterns:
action_fields += ['pattern' ]
if with_rules:
action_fields += ['rule']
return action.action_filter(action_fields)
def _filter_test_row(row, with_patterns=False, with_rules=True):
"""Filters a row t pulll out actions with relavant entries, based on
whether we want to include patterns and rules.
"""
return row.copy_with_action(
_filter_test_action(row.action, with_patterns, with_rules))
def _install_row_cases(row, values):
"""Installs row case names, based on values entries."""
# First define base testers that add row constraints and safety checks.
constraint_rows_map = values.get('constraint_rows')
if constraint_rows_map:
base_row = _filter_test_row(row, with_rules=False)
values['base_test_case'] = (
'Case%s' % constraint_rows_map[dgen_core.neutral_repr(base_row)])
else:
values['base_test_case'] = ''
# Add test decoders associated with the row in the table.
decoder_rows_map = values.get('decoder_rows')
if decoder_rows_map:
decoder_row = _filter_test_row(row)
values['test_case'] = (
'Case%s' % decoder_rows_map[dgen_core.neutral_repr(decoder_row)])
else:
values['test_case'] = ''
# Encorporate patterns with each row.
pattern_rows_map = values.get('test_rows')
if pattern_rows_map:
pattern_row = _filter_test_row(row, with_patterns=True)
values['test_pattern'] = (
'Case%s' % pattern_rows_map[dgen_core.neutral_repr(pattern_row)])
else:
values['test_pattern'] = ''
def _install_test_row(row, decoder, values,
with_patterns=False, with_rules=True):
"""Installs data associated with the given row into the values map.
Installs the baseline class, rule name, and constraints associated
with the row. If with_patterns is specified, then pattern information and
actual class information is also inserted.
"""
action = _filter_test_action(row.action, with_patterns, with_rules)
values['row_comment'] = dgen_output.commented_string(
repr(row.copy_with_action(action)))
_install_action(decoder, action, values)
return action
def _rows_to_test(decoder, values, with_patterns=False, with_rules=True):
"""Returns the rows of the decoder that define enough information
that testing can be done.
"""
generated_names = set()
rows = []
for table in decoder.tables():
for row in table.rows():
if (isinstance(row.action, dgen_core.DecoderAction) and
row.action.pattern()):
new_row = row.copy_with_action(
_install_test_row(row, decoder, values, with_patterns, with_rules))
constraint_tester = dgen_core.neutral_repr(new_row)
if constraint_tester not in generated_names:
generated_names.add(constraint_tester)
rows.append(new_row)
return sorted(rows)
def _row_filter_interesting_patterns(row):
"""Builds a copy of the row, removing uninteresting column patterns."""
return row.copy_with_patterns(_interesting_patterns(row.patterns))
def _generate_constraint_testers(decoder, values, out):
"""Generates the testers needed to implement the constraints
associated with each row having a pattern.
"""
rows = _rows_to_test(decoder, values, with_rules=False)
values['constraint_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
action = _install_test_row(row, decoder, values)
safety_to_check = _safety_to_check(action.safety())
defs_to_check = action.defs()
out.write(CONSTRAINT_TESTER_CLASS_HEADER % values)
if row.patterns or action.constraints().restrictions:
out.write(CONSTRAINT_TESTER_RESTRICTIONS_HEADER % values);
if safety_to_check or defs_to_check:
out.write(CONSTRAINT_TESTER_SANITY_HEADER % values)
out.write(CONSTRAINT_TESTER_CLASS_CLOSE % values)
if row.patterns or action.constraints().restrictions:
out.write(CONSTRAINT_TESTER_PARSE_HEADER % values)
if row.patterns:
out.write(ROW_CONSTRAINTS_HEADER % values);
for p in row.patterns:
not_p = p.negate()
values['comment'] = dgen_output.commented_string(repr(not_p), ' ')
values['code'] = not_p.to_bool()
out.write(CONSTRAINT_CHECK % values)
if action.constraints().restrictions:
out.write(PATTERN_CONSTRAINT_RESTRICTIONS_HEADER)
for c in action.constraints().restrictions:
not_c = c.negate()
values['comment'] = dgen_output.commented_string(repr(not_c), ' ')
values['code'] = not_c.to_bool()
out.write(CONSTRAINT_CHECK % values)
out.write(CONSTRAINT_TESTER_CLASS_FOOTER % values)
if safety_to_check or defs_to_check:
out.write(SAFETY_TESTER_HEADER % values)
for check in safety_to_check:
values['comment'] = dgen_output.commented_string(
repr(check), ' ')
values['code'] = check.to_bool()
out.write(SAFETY_TESTER_CHECK % values)
if defs_to_check:
values['comment'] = dgen_output.commented_string(
repr(defs_to_check), ' ')
values['code'] = defs_to_check.to_register_list()
out.write(DEFS_SAFETY_CHECK % values)
out.write(SAFETY_TESTER_FOOTER % values)
def _generate_rule_testers(decoder, values, out):
"""Generates the testers that tests the rule associated with
each row having a pattern.
"""
out.write(TESTER_CLASS_HEADER % values)
rows = _rows_to_test(decoder, values)
values['decoder_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
_install_test_row(row, decoder, values)
out.write(TESTER_CLASS % values)
def _decoder_restricted_to_tables(decoder, tables):
"""Returns a copy of the decoder, with only the given table names (
or all tables if no names are specified.
"""
if not tables:
return decoder
new_decoder = dgen_core.Decoder()
for tbl in [tbl for tbl in decoder.tables() if tbl.name in tables]:
new_decoder.add(tbl)
new_decoder.set_class_defs(decoder.get_class_defs())
return new_decoder
def _generate_test_patterns_with_baseline_tests(
decoder, values, out, baseline_test_tables):
_generate_test_patterns(decoder, values, out, False)
_generate_test_patterns(
_decoder_restricted_to_tables(decoder, baseline_test_tables),
values, out, True)
def _generate_test_patterns(decoder, values, out, add_baseline_tests):
"""Generates a test function for each row having a pattern associated
with the table row.
"""
rows = _rows_to_test(decoder, values, with_patterns=True)
values['test_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
action = _install_test_row(row, decoder, values, with_patterns=True)
if add_baseline_tests:
if action.find('generated_baseline'):
values['gen_decoder'] = action.find('generated_baseline')
out.write(TEST_FUNCTION_BASELINE_VS_BASELINE % values)
elif action.actual() == action.baseline():
out.write(TEST_FUNCTION_BASELINE % values)
else:
out.write(TEST_FUNCTION_ACTUAL_VS_BASELINE % values)
def _index_neutral_map(values):
"""Returns a dictionary from each neutral_repr(value) in list
values, to its corresponding index. This is done to reduce the
number of compares to find the index, speeding up code
generation.
"""
lookup_map = {}
index = 0
for v in values:
lookup_map[dgen_core.neutral_repr(v)] = index
index += 1
return lookup_map
|
from django.conf.urls import patterns, url
from .views import MediaLibraryAPIView, MediaLibraryItemView, AddShelfRelationAPIView
urlpatterns = patterns('',
url(r'^(?P<type>(audio|video|image))/$', MediaLibraryAPIView.as_view(), name='medialibrary'),
url(r'^(?P<pk>\d+)/$', MediaLibraryItemView.as_view(), name='medialibrary-shelf'),
url(r'^(?P<pk>\d+)/add/$', AddShelfRelationAPIView.as_view(), name='medialibrary-shelf-add-relation')
)
|
"""
This script is a trick to setup a fake Django environment, since this reusable
app will be developed and tested outside any specifiv Django project.
Via ``settings.configure`` you will be able to set all necessary settings
for your app and run the tests as if you were calling ``./manage.py test``.
Taken from https://github.com/mbrochh/tdd-with-django-reusable-app
"""
import os
import sys
from django.conf import settings
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'portlet',
'django_nose',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
if not settings.configured:
settings.configure(
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF='portlet.urls',
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), '../templates'),
),
COVERAGE_MODULE_EXCLUDES=COVERAGE_MODULE_EXCLUDES,
COVERAGE_REPORT_HTML_OUTPUT_DIR=os.path.join(
os.path.dirname(__file__), 'coverage')
)
from django_coverage.coverage_runner import CoverageRunner
from django_nose import NoseTestSuiteRunner
class NoseCoverageTestRunner(CoverageRunner, NoseTestSuiteRunner):
"""Custom test runner that uses nose and coverage"""
pass
def runtests(*test_args):
failures = NoseTestSuiteRunner(verbosity=2, interactive=True).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
try:
from django.conf.urls import *
except ImportError: # django < 1.4
from django.conf.urls.defaults import *
from .views import EventDetail, EventList, EventCreate, EventCreateJSON, EventDelete, EventUpdate
urlpatterns = patterns("events.views",
url(r"^$", EventList.as_view(template_name='events/event_list_calendar.html'), name='list'),
#url(r"^$", EventList.as_view(), name='list'),
url(r"^create/$", EventCreate.as_view(), name='create'),
url(r"^create/json/$", EventCreateJSON.as_view(), name='create_json'),
url(r"^(?P<pk>\d+)/$", EventDetail.as_view(), name='detail'),
url(r"^(?P<pk>\d+)/update$", EventUpdate.as_view(), name='update'),
url(r"^(?P<pk>\d+)/delete/$", EventDelete.as_view(), name='delete'),
url(r"^(?P<event_id>\d+)/rsvp/$", 'rsvp_event', name='rsvp'),
url(r"^(?P<event_id>\d+)/attend/$", 'attend_event', name='attend'),
#url(r"^calendar/(?P<year>\d+)/(?P<month>\d+)/$", 'calendar', name='calendar'),
#url(r"^calendar/$", CalendarRedirectView.as_view(), name='calendar-redirect'),
)
|
from copy import copy
import datetime
import time
import urllib2
from nose.tools import assert_equals
from nose.plugins.skip import SkipTest
from autoscalebot import TOO_LOW, JUST_RIGHT, TOO_HIGH
from autoscalebot.conf import AutoscaleSettings
from autoscalebot.models import HerokuAutoscaler
class TestSettings(AutoscaleSettings):
pass
test_settings = TestSettings()
test_settings.HEROKU_APP_NAME = "test-app"
test_settings.HEROKU_API_KEY = "1234567"
test_settings.HEARTBEAT_INTERVAL_IN_SECONDS = 30
test_settings.HEARTBEAT_URL = 'http://www.google.com'
test_settings.MAX_RESPONSE_TIME_IN_MS = 1000
test_settings.MIN_RESPONSE_TIME_IN_MS = 400
test_settings.NUMBER_OF_FAILS_TO_SCALE_UP_AFTER = 3
test_settings.NUMBER_OF_PASSES_TO_SCALE_DOWN_AFTER = 5
test_settings.MAX_DYNOS = 3
test_settings.MIN_DYNOS = 1
test_settings.INCREMENT = 1
test_settings.NOTIFY_IF_SCALE_DIFF_EXCEEDS_THRESHOLD = None
test_settings.NOTIFY_IF_SCALE_DIFF_EXCEEDS_PERIOD_IN_MINUTES = None
test_settings.NOTIFY_IF_NEEDS_EXCEED_MAX = True
test_settings.NOTIFY_IF_NEEDS_BELOW_MIN = True
test_settings.NOTIFICATION_BACKENDS = ["autoscalebot.backends.notification.TestBackend", ]
class MockHerokuProcesses:
def __init__(self):
self.current = 0
self._processes = [1, ]
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = [1, ]
return self._processes
def scale(self, new_num):
self._processes = [n + 1 for n in range(0, new_num)]
def __iter__(self):
return self
def next(self):
self.current += 1
if self.current > len(self.processes):
raise StopIteration
else:
return self.processes[self.current - 1]
class MockBrokenHerokuProcesses(MockHerokuProcesses):
def scale(self):
raise Exception
class MockHerokuApp:
def __init__(self, *args, **kwargs):
self.processes
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = {'web': MockHerokuProcesses(), }
return self._processes
class MockBrokenHerokuApp(MockHerokuApp):
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = {'web': MockBrokenHerokuProcesses(), }
return self._processes
class MockHerokuAutoscaler(HerokuAutoscaler):
def __init__(self, *args, **kwargs):
super(MockHerokuAutoscaler, self).__init__(*args, **kwargs)
self.heroku_app
@property
def heroku_app(self):
if not hasattr(self, "_heroku_app"):
self._heroku_app = MockHerokuApp()
return self._heroku_app
def out_of_band_heroku_scale(self, num_dynos):
# Ugly mock out of band scale
self.heroku_app.processes["web"]._processes = [1, 2, 3, 4]
self._num_dynos = len([i for i in self.heroku_app.processes["web"]._processes])
class MockValidResponse:
def read(self, *args, **kwargs):
return "A"
class Mock500Response:
def read(self, *args, **kwargs):
raise Exception
def mock_valid_urlopen(self, *args, **kwargs):
time.sleep(0.5)
return MockValidResponse()
def mock_invalid_urlopen(self, *args, **kwargs):
return Mock500Response()
def mock_fast_urlopen(self, *args, **kwargs):
return MockValidResponse()
def mock_slow_urlopen(self, *args, **kwargs):
time.sleep(2)
return MockValidResponse()
class TestHerokuAutoscaler:
def setUp(self):
self.test_scaler
@property
def test_scaler(self):
if not hasattr(self, "_test_scaler"):
self._test_scaler = MockHerokuAutoscaler(test_settings)
return self._test_scaler
def test_heroku_scale(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.heroku_scale(3)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.heroku_scale(5)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.heroku_scale(2)
assert_equals(self.test_scaler.num_dynos, 2)
def test_num_dynos(self):
self.test_scaler.heroku_scale(3)
assert_equals(len([i for i in self.test_scaler.heroku_app.processes['web']]), 3)
def test_add_to_history(self):
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(JUST_RIGHT)
assert_equals(self.test_scaler.results, [TOO_LOW, TOO_HIGH, JUST_RIGHT])
def test_add_to_history_caps_length(self):
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.results, [TOO_LOW, TOO_LOW, TOO_LOW, TOO_LOW, TOO_LOW])
def test_needs_scale_up_works(self):
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.needs_scale_up, False)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
assert_equals(self.test_scaler.needs_scale_up, True)
def test_needs_scale_down_works(self):
self.test_scaler.add_to_history(TOO_HIGH)
assert_equals(self.test_scaler.needs_scale_down, False)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.needs_scale_down, True)
def test_scale_up(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scale_up_stops_at_limit(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
def test_scale_down(self):
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.scale_down()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scale_down_stops_at_limit(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
assert_equals(self.test_scaler.num_dynos, 1)
def test_do_autoscale_up_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
def test_do_autoscale_down_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 1)
def test_max_dynos_from_time_based_settings_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MAX_DYNOS = {
"0:00": 2,
"9:00": 5,
"17:00": 3
}
now_time = datetime.datetime.now()
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
early_morning = datetime.datetime(now_time.year, now_time.month, now_time.day, 1, 0)
mid_day = datetime.datetime(now_time.year, now_time.month, now_time.day, 12, 0)
evening = datetime.datetime(now_time.year, now_time.month, now_time.day, 18, 0)
morning_off_by_minutes = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 5)
morning_exact = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 0)
assert_equals(self.test_scaler.max_num_dynos(when=early_morning), 2)
assert_equals(self.test_scaler.max_num_dynos(when=mid_day), 5)
assert_equals(self.test_scaler.max_num_dynos(when=evening), 3)
assert_equals(self.test_scaler.max_num_dynos(when=morning_off_by_minutes), 5)
assert_equals(self.test_scaler.max_num_dynos(when=morning_exact), 5)
def test_min_dynos_from_time_based_settings_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = {
"0:00": 2,
"9:00": 5,
"17:00": 3
}
now_time = datetime.datetime.now()
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
early_morning = datetime.datetime(now_time.year, now_time.month, now_time.day, 1, 0)
mid_day = datetime.datetime(now_time.year, now_time.month, now_time.day, 12, 0)
evening = datetime.datetime(now_time.year, now_time.month, now_time.day, 18, 0)
morning_off_by_minutes = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 5)
morning_exact = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 0)
assert_equals(self.test_scaler.min_num_dynos(when=early_morning), 2)
assert_equals(self.test_scaler.min_num_dynos(when=mid_day), 5)
assert_equals(self.test_scaler.min_num_dynos(when=evening), 3)
assert_equals(self.test_scaler.min_num_dynos(when=morning_off_by_minutes), 5)
assert_equals(self.test_scaler.min_num_dynos(when=morning_exact), 5)
def test_custom_increments_work(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.INCREMENT = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
def test_if_min_is_changed_to_higher_than_current_scaling_works(self):
self.test_scaler.heroku_scale(1)
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_if_max_is_changed_to_lower_than_current_scaling_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MAX_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.out_of_band_heroku_scale(4)
assert_equals(self.test_scaler.num_dynos, 4)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scaling_clears_the_results_queue(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
assert_equals(self.test_scaler.results, [])
def test_a_mixed_stack_of_low_high_scales_to_the_min_needed_for_the_condition(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_ping_and_store_for_valid_url(self):
urllib2.urlopen = mock_valid_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [JUST_RIGHT])
def test_ping_and_store_for_invalid_url(self):
urllib2.urlopen = mock_invalid_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_HIGH])
def test_ping_and_store_for_slow_url(self):
urllib2.urlopen = mock_slow_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_HIGH])
def test_ping_and_store_for_fast_url(self):
urllib2.urlopen = mock_fast_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_LOW])
def test_notify_if_scale_diff_exceeds_threshold_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
print "Feature not written"
raise SkipTest
def test_notify_if_scale_diff_exceeds_period_in_minutes_works(self):
print "Feature not written"
raise SkipTest
def test_notify_if_needs_exceed_max_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "max" in self.test_scaler.backends[0].messages[0]
def test_notify_if_needs_below_min_does_not_notify_on_one_dyno_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_if_needs_below_min_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "min" in self.test_scaler.backends[0].messages[0]
def test_notify_if_needs_exceed_max_disabled_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFY_IF_NEEDS_EXCEED_MAX = False
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_if_needs_below_min_disabled_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFY_IF_NEEDS_BELOW_MIN = False
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_on_scale_fails_works(self):
self.test_scaler._heroku_app = MockBrokenHerokuApp()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "fail" in self.test_scaler.backends[0].messages[0]
def test_notify_on_every_scale_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
def test_all_backends_are_called_on_notification(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFICATION_BACKENDS = [
"autoscalebot.backends.notification.TestBackend",
"autoscalebot.backends.notification.TestBackend"
]
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals([len(b.messages) for b in self.test_scaler.backends], [0, 0])
self.test_scaler.scale_up()
assert_equals([len(b.messages) for b in self.test_scaler.backends], [1, 1])
|
import io
import json
import os
import unittest
from . import guidanceresponse
from .fhirdate import FHIRDate
class GuidanceResponseTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("GuidanceResponse", js["resourceType"])
return guidanceresponse.GuidanceResponse(js)
def testGuidanceResponse1(self):
inst = self.instantiate_from("guidanceresponse-example.json")
self.assertIsNotNone(inst, "Must have instantiated a GuidanceResponse instance")
self.implGuidanceResponse1(inst)
js = inst.as_json()
self.assertEqual("GuidanceResponse", js["resourceType"])
inst2 = guidanceresponse.GuidanceResponse(js)
self.implGuidanceResponse1(inst2)
def implGuidanceResponse1(self, inst):
self.assertEqual(inst.contained[0].id, "outputParameters1")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier.system, "http://example.org")
self.assertEqual(inst.identifier.value, "guidanceResponse1")
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2017-03-10T16:02:00Z").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2017-03-10T16:02:00Z")
self.assertEqual(inst.reasonCodeableConcept.text, "Guideline Appropriate Ordering Assessment")
self.assertEqual(inst.requestId, "guidanceRequest1")
self.assertEqual(inst.status, "success")
self.assertEqual(inst.text.status, "generated")
|
from distutils.core import setup
import os
import glob
setup(
name = 'pyspecfit',
url = 'http://justincely.github.io',
version = '0.0.1',
description = 'interact with IRAF task specfit I/O products',
author = 'Justin Ely',
author_email = 'ely@stsci.edu',
keywords = ['astronomy'],
classifiers = ['Programming Language :: Python',
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Libraries :: Python Modules'],
packages = ['pyspecfit']
)
|
from django.conf import settings
from .locals import get_cid
DEFAULT_CID_SQL_COMMENT_TEMPLATE = 'cid: {cid}'
class CidCursorWrapper:
"""
A cursor wrapper that attempts to add a cid comment to each query
"""
def __init__(self, cursor):
self.cursor = cursor
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def add_comment(self, sql):
cid_sql_template = getattr(
settings, 'CID_SQL_COMMENT_TEMPLATE', DEFAULT_CID_SQL_COMMENT_TEMPLATE
)
cid = get_cid()
if not cid:
return sql
# FIXME (dbaty): we could use "--" prefixed comments so that
# we would not have to bother with escaping the cid (assuming
# it does not contain newline characters).
cid = cid.replace('/*', r'\/\*').replace('*/', r'\*\/')
return "/* {} */\n{}".format(cid_sql_template.format(cid=cid), sql)
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
sql = self.add_comment(sql)
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
sql = self.add_comment(sql)
return self.cursor.executemany(sql, param_list)
|
"""Fichier contenant la volonté RelacherRames"""
import re
from secondaires.navigation.equipage.ordres.relacher_rames import \
RelacherRames as OrdreRelacherRames
from secondaires.navigation.equipage.ordres.long_deplacer import LongDeplacer
from secondaires.navigation.equipage.volonte import Volonte
class RelacherRames(Volonte):
"""Classe représentant une volonté.
Cette volonté demande à ceux qui tiennent les rames de les lâcher.
"""
cle = "relacher_rames"
ordre_court = re.compile(r"^rr$", re.I)
ordre_long = re.compile(r"^relacher\s+rames?$", re.I)
def choisir_matelots(self, exception=None):
"""Retourne le matelot le plus apte à accomplir la volonté."""
equipage = self.navire.equipage
objectifs = []
rames = self.navire.rames
rames = [r for r in rames if r.tenu]
for paire in rames:
matelot = equipage.get_matelot_depuis_personnage(paire.tenu)
if matelot:
objectifs.append((matelot, paire))
return objectifs
def executer(self, objectifs):
"""Exécute la volonté."""
for sequence in objectifs:
matelot, rames = sequence
matelot.invalider_ordres("ramer")
navire = self.navire
ordres = []
relacher = OrdreRelacherRames(matelot, navire, rames)
ordres.append(relacher)
self.ajouter_ordres(matelot, ordres)
def crier_ordres(self, personnage):
"""On fait crier l'ordre au personnage."""
msg = "{} s'écrie : rameurs, laissez courir !".format(
personnage.distinction_audible)
self.navire.envoyer(msg)
@classmethod
def extraire_arguments(cls, navire):
"""Extrait les arguments de la volonté."""
return ()
|
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollsig', 'poll', 'poll-cv'],
'mac': ['poll'],
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
elif self.platform == 'windows':
self._use_cmake = False
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV '
try:
cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None);
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
timeout_scaling = 1
if polling_strategy == 'poll-cv':
timeout_scaling *= 5
if polling_strategy in target.get('excluded_poll_engines', []):
continue
# Scale overall test timeout if running under various sanitizers.
config = self.args.config
if ('asan' in config
or config == 'msan'
or config == 'tsan'
or config == 'ubsan'
or config == 'helgrind'
or config == 'memcheck'):
timeout_scaling *= 20
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
if self._use_cmake:
binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
else:
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
out.append(self.config.job_spec(cmdline,
shortname='%s %s' % (' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
out.append(self.config.job_spec(cmdline,
shortname=' '.join(
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
'check_epollexclusive']
def make_options(self):
return self._make_options;
def pre_build_steps(self):
if self._use_cmake:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return ['CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix]
def _gcc_make_options(self, version_suffix):
return ['CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class NodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6',
'node7', 'node8',
'electron1.3', 'electron1.6'])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
if self.platform == 'windows':
return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
else:
run_script = 'run_node'
if self.runtime == 'electron':
run_script += '_electron'
return [self.config.job_spec(['tools/run_tests/helper_scripts/{}.sh'.format(run_script),
self.node_version],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
build_script = 'pre_build_node'
if self.runtime == 'electron':
build_script += '_electron'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
if self.config == 'dbg':
config_flag = '--debug'
else:
config_flag = '--release'
return [['tools\\run_tests\\helper_scripts\\build_node.bat',
config_flag]]
else:
build_script = 'build_node'
if self.runtime == 'electron':
build_script += '_electron'
# building for electron requires a patch version
self.node_version += '.0'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version]]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(collections.namedtuple('PythonConfig', [
'name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [self.config.job_spec(
config.run,
timeout_seconds=5*60,
environ=dict(list(environment.items()) +
[('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
for config in self.pythons]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
if self.args.compiler in ['python3.5', 'python3.6']:
return 'pyenv'
elif self.args.compiler == 'python_alpine':
return 'alpine'
else:
return 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
minor='7', bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(name='py34', major='3',
minor='4', bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(name='py35', major='3',
minor='5', bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(name='py36', major='3',
minor='6', bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(name='pypy', major='2',
config_vars=config_vars)
pypy32_config = _pypy_config_generator(name='pypy3', major='3',
config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python27_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
tests.append(self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['coreclr', 'default'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
self._make_options = []
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file,
'-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*',
'-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options;
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60*60,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
timeout_seconds=30*60,
shortname='objc-examples-build',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ={'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [self.config.job_spec(cmd['script'].split(),
timeout_seconds=30*60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
class NodeExpressLanguage(object):
"""Dummy Node express test target to enable running express performance
benchmarks"""
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
return []
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node_express'
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'node_express': NodeExpressLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.' % arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' % args.arch)
sys.exit(1)
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2013', 'vs2015',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6',
'coreclr',
'cmake'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--iomgr_platform',
choices=['native', 'uv'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
help='Measure the cpu costs of tests')
argp.add_argument('--update_submodules', default=[], nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. ' +
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--quiet_success',
default=False,
action='store_const',
const=True,
help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. ' +
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument('--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
args = argp.parse_args()
if args.force_default_poller:
_POLLING_STRATEGIES = {}
jobset.measure_cpu_costs = args.measure_cpu_costs
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print('WARNING: may need to regenerate projects, but since we are not on')
print(' Linux this step is being skipped. Compilation MAY fail.')
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print('languages with custom make options cannot be built simultaneously with other languages')
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(set([make_option for lang in languages for make_option in lang.make_options()]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print ('Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print ('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
if makefile.startswith('cmake/build/'):
return [jobset.JobSpec(['cmake', '--build', '.',
'--target', '%s' % target,
'--config', _MSBUILD_CONFIG[cfg]],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets]
extra_args = []
# better do parallel compilation
# empirically /m:2 gives the best performance/price and should prevent
# overloading the windows workers.
extra_args.extend(['/m:2'])
# disable PDB generation: it's broken, and we don't need it during CI
extra_args.extend(['/p:Jenkins=true'])
return [
jobset.JobSpec([_windows_build_bat(args.compiler),
'vsprojects\\%s.sln' % target,
'/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
extra_args +
language_make_options,
shell=True, timeout_seconds=None)
for target in targets]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-j', '%d' % args.jobs] +
targets,
cwd='cmake/build',
timeout_seconds=None)]
if targets:
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-f', makefile,
'-j', '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
'CONFIG=%s' % cfg,
'Q='] +
language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) +
targets,
timeout_seconds=None)]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
try:
subprocess.check_call('bins/%s/check_epollexclusive' % args.config)
return True
except subprocess.CalledProcessError, e:
return False
except OSError, e:
# For languages other than C and Windows the binary won't exist
return False
def _build_and_run(
check_cancelled, newline_on_success, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string() in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string()]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent/100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success, max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_results_to_bq(resultset, args.bq_result_table, args, platform_string())
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
DEFAULT_SAVED_SEARCHES = [
{
'name': 'Unresolved Issues',
'query': 'is:unresolved',
},
{
'name': 'Needs Triage',
'query': 'is:unresolved is:unassigned'
},
{
'name': 'Assigned To Me',
'query': 'is:unresolved assigned:me'
},
{
'name': 'My Bookmarks',
'query': 'is:unresolved bookmarks:me'
},
{
'name': 'New Today',
'query': 'is:unresolved age:-24h'
},
]
class Migration(DataMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = True
def forwards(self, orm):
db.commit_transaction()
try:
self._forwards(orm)
except Exception:
db.start_transaction()
raise
db.start_transaction()
def _forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
SavedSearch = orm['sentry.SavedSearch']
for search in RangeQuerySetWrapperWithProgressBar(
SavedSearch.objects.filter(is_global__isnull=True)
):
search.is_global = False
search.save()
default_searches = []
for search in DEFAULT_SAVED_SEARCHES:
default_searches.append(
SavedSearch(
name=search['name'],
query=search['query'],
is_global=True,
)
)
SavedSearch.objects.bulk_create(default_searches)
def backwards(self, orm):
"Write your backwards methods here."
# These will be the only rows with a null `project_id`, so we can safely
# make the column `not null` after deleting them.
SavedSearch = orm['sentry.SavedSearch']
SavedSearch.objects.filter(is_global=True).delete()
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'82029f091b094a2ca18ef45d3958513c683b4643c65f4fbfacfbd1cdee187a51'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'74093ba9478e4d41ae25dfcb036bd062ea58b43d394140a4989d6ec19f179b6a'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Proper Crawdad'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'12d42834d62142d4beaecf34588354dd'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 1, 18, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 2, 17, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'85185474cfa44548852999d7e605898adf44aa5a15b04d0da501445694f622a7'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'7ff1f4918fe84acfa4172b59731cc504d6faae0cc6234271a6dcbc8c332cd65a'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.assistantactivity': {
'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'"},
'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 1, 25, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dashboard': {
'Meta': {'unique_together': "(('organization', 'title'),)", 'object_name': 'Dashboard'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.deletedorganization': {
'Meta': {'object_name': 'DeletedOrganization'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'object_name': 'DeletedProject'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'object_name': 'DeletedTeam'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.discoversavedquery': {
'Meta': {'object_name': 'DiscoverSavedQuery'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.DiscoverSavedQueryProject']", 'symmetrical': 'False'}),
'query': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.discoversavedqueryproject': {
'Meta': {'unique_together': "(('project', 'discover_saved_query'),)", 'object_name': 'DiscoverSavedQueryProject'},
'discover_saved_query': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DiscoverSavedQuery']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventattachment': {
'Meta': {'unique_together': "(('project_id', 'event_id', 'file'),)", 'object_name': 'EventAttachment', 'index_together': "(('project_id', 'date_added'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.externalissue': {
'Meta': {'unique_together': "(('organization_id', 'integration_id', 'key'),)", 'object_name': 'ExternalIssue'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'metadata': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "[('group_id', 'environment_id')]", 'object_name': 'GroupEnvironment', 'index_together': "[('environment_id', 'first_release_id')]"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'first_release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'object_name': 'GroupShare'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'971a077057f54734809e8dc332408db4'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'), ('idp', 'user'))", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'external_id'),)", 'object_name': 'IdentityProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.integrationexternalproject': {
'Meta': {'unique_together': "(('organization_integration_id', 'external_id'),)", 'object_name': 'IntegrationExternalProject'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization_integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'resolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'unresolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.latestrelease': {
'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'token_expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectavatar': {
'Meta': {'object_name': 'ProjectAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Project']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectcficachefile': {
'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectCfiCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectdebugfile': {
'Meta': {'object_name': 'ProjectDebugFile', 'db_table': "'sentry_projectdsymfile'", 'index_together': "(('project', 'debug_id'),)"},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'debug_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'uuid'"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'object_name': 'ProjectOwnership'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectredirect': {
'Meta': {'unique_together': "(('organization', 'redirect_slug'),)", 'object_name': 'ProjectRedirect'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'redirect_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.promptsactivity': {
'Meta': {'unique_together': "(('user', 'feature', 'organization_id', 'project_id'),)", 'object_name': 'PromptsActivity'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'), ('organization_id', 'merge_commit_sha'))"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.pullrequestcommit': {
'Meta': {'unique_together': "(('pull_request', 'commit'),)", 'object_name': 'PullRequestCommit', 'db_table': "'sentry_pullrequest_commit'"},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'pull_request': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.PullRequest']"})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.relay': {
'Meta': {'object_name': 'Relay'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'relay_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization', 'release', 'environment'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile', 'index_together': "(('release', 'name'),)"},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseprojectenvironment': {
'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_global': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 2, 17, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'872e507076244783876be9f74901e6ae'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.sentryapp': {
'Meta': {'object_name': 'SentryApp'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiApplication']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_alertable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.TextField', [], {}),
'overview': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'owned_sentry_apps'", 'to': "orm['sentry.Organization']"}),
'proxy_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.User']"}),
'redirect_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'3e3a2f6f-9136-4a26-8155-31829e66ecc4'", 'max_length': '64'}),
'webhook_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sentry.sentryappavatar': {
'Meta': {'object_name': 'SentryAppAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.SentryApp']"})
},
'sentry.sentryappinstallation': {
'Meta': {'object_name': 'SentryAppInstallation'},
'api_grant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiGrant']"}),
'authorization': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiAuthorization']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_app_installations'", 'to': "orm['sentry.Organization']"}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'installations'", 'to': "orm['sentry.SentryApp']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'227713f5-78ab-401b-a90d-80720b8ed80e'", 'max_length': '64'})
},
'sentry.servicehook': {
'Meta': {'object_name': 'ServiceHook'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'f9f6f76988ea45d5b1029e2e604e95ace68b3d9e8c2743b9a7755a347e488f74'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teamavatar': {
'Meta': {'object_name': 'TeamAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Team']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sentry_app': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'flfk8HwwLBodD7uLZ8Jy6PhjawDBeBKT'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.widget': {
'Meta': {'unique_together': "(('dashboard', 'order'), ('dashboard', 'title'))", 'object_name': 'Widget'},
'dashboard': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Dashboard']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'display_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.widgetdatasource': {
'Meta': {'unique_together': "(('widget', 'name'), ('widget', 'order'))", 'object_name': 'WidgetDataSource'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'widget': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Widget']"})
}
}
complete_apps = ['sentry']
symmetrical = True
|
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import TestCase, run_module_suite, assert_raises
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal,
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
class TestMaskedArray(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
def test_creation_with_list_of_maskedarrays(self):
# Tests creaating a masked array from alist of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
self.assertTrue(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
self.assertTrue(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
# tests of indexing
assert_(type(x2[1, 0]) is type(x1[1, 0]))
assert_(x1[1, 0] == x2[1, 0])
assert_(x2[1, 1] is masked)
assert_equal(x1[0, 2], x2[0, 2])
assert_equal(x1[0, 1:], x2[0, 1:])
assert_equal(x1[:, 2], x2[:, 2])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[0, 2] = 9
x2[0, 2] = 9
assert_equal(x1, x2)
x1[0, 1:] = 99
x2[0, 1:] = 99
assert_equal(x1, x2)
x2[0, 1] = masked
assert_equal(x1, x2)
x2[0, 1:] = masked
assert_equal(x1, x2)
x2[0, :] = x1[0, :]
x2[0, 1] = masked
assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
assert_(allequal(x4[1], array([1, 2, 3])))
x1 = np.matrix(np.arange(5) * 1.0)
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
#self.assertTrue( y1._data is x1)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.data))
#self.assertTrue( y1.mask is m)
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
self.assertTrue(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
#self.assertTrue( y2.mask is m)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
#self.assertTrue( y2.mask is not m)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
def test_pickling(self):
# Tests pickling
a = arange(10)
a[::3] = masked
a.fill_value = 999
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
assert_equal(a_pickled.fill_value, 999)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda:float(a))
assert_equal(float(a[-1]), 3.)
self.assertTrue(np.isnan(float(a[0])))
self.assertRaises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
self.assertRaises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_w_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_w_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_w_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_w_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
def test_filled_w_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
self.assertTrue(a.flags['F_CONTIGUOUS'])
self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
self.assertTrue(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
self.assertTrue(isinstance(f, mvoid))
self.assertTrue(f[0] is masked)
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i8', (3,))])
assert str(t_ma[0]) == "([1, --, 3],)"
assert repr(t_ma[0]) == "([1, --, 3],)"
# additonal tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i8', (2,2))])
assert str(t_2d[0]) == "([[1, --], [--, 4]],)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]],)"
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i8'), ('b', '<i8')])
assert str(t_0d[0]) == "(--, 2)"
assert repr(t_0d[0]) == "(--, 2)"
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i8', (2,2)), ('b', float)])
assert str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
assert repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i8'), ('b', 'i4,i4')])
assert str(t_ne[0]) == "(--, (--, 1))"
assert repr(t_ne[0]) == "(--, (--, 1))"
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
assert mx[0] is mx1
assert mx[1] is not mx2
assert np.all(mx[1].data == mx2.data)
assert np.all(mx[1].mask)
# check that we return a view.
mx[1].data[0] = 0.
assert mx2[0] == 0.
class TestMaskedArrayArithmetic(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked snigleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
self.assertTrue(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(IndexError, ott.count, 1)
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum(xmr))
assert_equal(min(xr), minimum(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum(x), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
self.assertTrue(xm[0].max() is masked)
self.assertTrue(xm[0].max(0) is masked)
self.assertTrue(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
self.assertTrue(xm[0].min() is masked)
self.assertTrue(xm[0].min(0) is masked)
self.assertTrue(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
self.assertTrue(xm[0].ptp() is masked)
self.assertTrue(xm[0].ptp(0) is masked)
self.assertTrue(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
self.assertTrue(x.min() is masked)
self.assertTrue(x.max() is masked)
self.assertTrue(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test, [True, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [False, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [True, False])
assert_equal(test.mask, [False, False])
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test, [False, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [True, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [False, True])
assert_equal(test.mask, [False, False])
def test_eq_w_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# With partial mask
a = array([1, 2], mask=[0, 1])
assert_equal(a == None, False)
assert_equal(a.data == None, False)
assert_equal(a.mask == None, False)
assert_equal(a != None, True)
# With nomask
a = array([1, 2], mask=False)
assert_equal(a == None, False)
assert_equal(a != None, True)
# With complete mask
a = array([1, 2], mask=True)
assert_equal(a == None, False)
assert_equal(a != None, True)
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_w_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
def test_numpyarithmetics(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes(TestCase):
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
#assert_equal(xh.mask.ctypes._data, m.ctypes._data)
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
# Test setting
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
testflat[0] = 9
assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
# test that matrices keep the correct shape (#4615)
a = masked_array(np.matrix(np.eye(2)), mask=0)
b = a.flat
b01 = b[:2]
assert_equal(b01.data, array([[1., 0.]]))
assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues(TestCase):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, asbytes("0"))
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier: match structured types by position
#fill_val = np.array((-999, -12345678.9, "???"),
# dtype=[("A", int), ("B", float), ("C", "|S3")])
# BEHAVIOR in 1.6 and later: match structured types by name
fill_val = np.array(("???", -999, -12345678.9),
dtype=[("c", "|S3"), ("a", int), ("b", float), ])
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, asbytes("???"))
fval = _check_fill_value(fill_val, object)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#self.assertTrue(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array(asbytes_nested(['3', '4', '5']))
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., asbytes('999')])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, asbytes('999'))
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, asbytes('???')))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, asbytes('???'))
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute (by
# default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overriden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overriden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
class TestUfuncs(TestCase):
# Test class for the application of ufuncs on MaskedArrays.
def setUp(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
# 'nonzero', 'around',
'floor', 'ceil',
# 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
self.assertTrue(not alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
self.assertTrue(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
self.assertRaises(TypeError, operator.mul, a, "abc")
self.assertRaises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2(object):
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
class TestMaskedArrayInPlaceArithmetics(TestCase):
# Test MaskedArray Arithmetics
def setUp(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes._data
x += 1.
assert_(id1 == x.data.ctypes._data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
#assert_equal(xm._data,
# [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, "Failed on type=%s." % t)
class TestMaskedArrayMethods(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test all close w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
self.assertTrue(full.all() is masked)
full.all(out=store)
self.assertTrue(store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
store = empty((), dtype=bool)
self.assertTrue(full.any() is masked)
full.any(out=store)
self.assertTrue(not store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
#self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
self.assertTrue(x[0] is not masked)
assert_equal(x[0], 0)
self.assertTrue(x[1] is not masked)
assert_equal(x[1], 3)
self.assertTrue(x[2] is masked)
self.assertTrue(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
self.assertTrue(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on flexible dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
b = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
test = sort(a, endwith=False)
b = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
self.assertTrue(data.squeeze() is masked)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
self.assertTrue(xlist[1] is None)
self.assertTrue(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, asbytes('one')),
(2, 2.2, asbytes('two')),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float)
cols = np.zeros(m, np.float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=np.object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method() is masked)
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=nout)
self.assertTrue(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method(ddof=1) is masked)
self.assertTrue(method(0, ddof=1) is masked)
self.assertTrue(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
self.assertTrue(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions(TestCase):
# Test class for miscellaneous functions.
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
self.assertTrue(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
self.assertTrue(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
self.assertTrue(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_w_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
self.assertTrue(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
c = np.reshape(a, (2, 5))
self.assertTrue(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
self.assertTrue(c[0, 0] is masked)
self.assertTrue(c.flags['C'])
def test_make_mask_descr(self):
# Test make_mask_descr
# Flexible
ntype = [('a', np.float), ('b', np.float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', np.bool), ('b', np.bool)])
# Standard w/ shape
ntype = (np.float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (np.bool, 2))
# Standard standard
ntype = np.float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(np.bool))
# Nested
ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
# Named+ shape
ntype = [('a', (np.float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (np.bool, 2))]))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=np.bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.float), ('b', np.float)]
bdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using True as input. Won't work, but keep it for the kicks
# test = mask_or(mask, True)
# control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype)
# assert_equal(test, control)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', np.bool), ('B', np.bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standarad dtype
mask = np.array([0, 0, 1], dtype=np.bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test.ndim, 1)
# with .compessed() overriden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test, 42)
class TestMaskedFields(TestCase):
def setUp(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'two', 'three', 'four', 'five']))
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], MaskedArray))
# One element masked
self.assertTrue(isinstance(a[0], MaskedArray))
assert_equal_records(a[0]._data, a._data[0])
assert_equal_records(a[0]._mask, a._mask[0])
# All element masked
self.assertTrue(isinstance(a[-2], MaskedArray))
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedView(TestCase):
def setUp(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
self.assertTrue(not isinstance(test, MaskedArray))
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert default_fill_value(1 + 1j) == 1.e20 + 0.0j
if __name__ == "__main__":
run_module_suite()
|
import torch
import pickle
import logging
from .baseclasses import ScalarMonitor
from .meta import Regurgitate
class Saver(ScalarMonitor):
def __init__(self, save_monitor, model_file, settings_file, **kwargs):
self.saved = False
self.save_monitor = save_monitor
self.model_file = model_file
self.settings_file = settings_file
super().__init__('save', **kwargs)
def call(self, model=None, settings=None, **kwargs):
if self.value is None:
self.value = self.save_monitor.value
if self.save_monitor.changed:
self.save(model, settings)
self.value = self.save_monitor.value
return self.value
def save(self, model, settings):
with open(self.model_file, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
if torch.cuda.is_available():
model.cuda()
with open(self.settings_file, "wb") as f:
pickle.dump(settings, f)
|
from distutils.core import setup
import toolkit_library
from toolkit_library import inspector
def read_modules():
result = ''
package = inspector.PackageInspector(toolkit_library)
for module in package.get_all_modules():
exec('from toolkit_library import {0}'.format(module))
result = '{0}{1}\n'.format(result, eval('{0}.__doc__'.format(module)))
return result.rstrip()
readme = ''
with open('README_template', 'r') as file:
readme = file.read()
readme = readme.replace('{{ modules }}', read_modules())
with open('README.rst', 'w') as file:
file.write(readme)
setup(
name = toolkit_library.__name__,
version = toolkit_library.__version__,
url = 'https://github.com/tylerlong/toolkit_library',
license = 'BSD',
author = toolkit_library.__author__,
author_email = 'tyler4long@gmail.com',
description = 'Toolkit Library, full of useful toolkits',
long_description = readme,
packages = ['toolkit_library', ],
platforms = 'any',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than or equal "
"to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes, self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state)
|
"""Adds the code parts to a resource APK."""
import argparse
import logging
import os
import shutil
import sys
import tempfile
import zipfile
import zlib
import finalize_apk
from util import build_utils
from util import diff_utils
from util import zipalign
zipalign.ApplyZipFileZipAlignFix()
_NO_COMPRESS_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.gif', '.wav', '.mp2',
'.mp3', '.ogg', '.aac', '.mpg', '.mpeg', '.mid',
'.midi', '.smf', '.jet', '.rtttl', '.imy', '.xmf',
'.mp4', '.m4a', '.m4v', '.3gp', '.3gpp', '.3g2',
'.3gpp2', '.amr', '.awb', '.wma', '.wmv', '.webm')
def _ParseArgs(args):
parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(parser)
parser.add_argument(
'--assets',
help='GYP-list of files to add as assets in the form '
'"srcPath:zipPath", where ":zipPath" is optional.')
parser.add_argument(
'--java-resources', help='GYP-list of java_resources JARs to include.')
parser.add_argument('--write-asset-list',
action='store_true',
help='Whether to create an assets/assets_list file.')
parser.add_argument(
'--uncompressed-assets',
help='Same as --assets, except disables compression.')
parser.add_argument('--resource-apk',
help='An .ap_ file built using aapt',
required=True)
parser.add_argument('--output-apk',
help='Path to the output file',
required=True)
parser.add_argument('--format', choices=['apk', 'bundle-module'],
default='apk', help='Specify output format.')
parser.add_argument('--dex-file',
help='Path to the classes.dex to use')
parser.add_argument(
'--jdk-libs-dex-file',
help='Path to classes.dex created by dex_jdk_libs.py')
parser.add_argument('--uncompress-dex', action='store_true',
help='Store .dex files uncompressed in the APK')
parser.add_argument('--native-libs',
action='append',
help='GYP-list of native libraries to include. '
'Can be specified multiple times.',
default=[])
parser.add_argument('--secondary-native-libs',
action='append',
help='GYP-list of native libraries for secondary '
'android-abi. Can be specified multiple times.',
default=[])
parser.add_argument('--android-abi',
help='Android architecture to use for native libraries')
parser.add_argument('--secondary-android-abi',
help='The secondary Android architecture to use for'
'secondary native libraries')
parser.add_argument(
'--is-multi-abi',
action='store_true',
help='Will add a placeholder for the missing ABI if no native libs or '
'placeholders are set for either the primary or secondary ABI. Can only '
'be set if both --android-abi and --secondary-android-abi are set.')
parser.add_argument(
'--native-lib-placeholders',
help='GYP-list of native library placeholders to add.')
parser.add_argument(
'--secondary-native-lib-placeholders',
help='GYP-list of native library placeholders to add '
'for the secondary ABI')
parser.add_argument('--uncompress-shared-libraries', default='False',
choices=['true', 'True', 'false', 'False'],
help='Whether to uncompress native shared libraries. Argument must be '
'a boolean value.')
parser.add_argument(
'--apksigner-jar', help='Path to the apksigner executable.')
parser.add_argument('--zipalign-path',
help='Path to the zipalign executable.')
parser.add_argument('--key-path',
help='Path to keystore for signing.')
parser.add_argument('--key-passwd',
help='Keystore password')
parser.add_argument('--key-name',
help='Keystore name')
parser.add_argument(
'--min-sdk-version', required=True, help='Value of APK\'s minSdkVersion')
parser.add_argument(
'--best-compression',
action='store_true',
help='Use zip -9 rather than zip -1')
parser.add_argument(
'--library-always-compress',
action='append',
help='The list of library files that we always compress.')
parser.add_argument(
'--library-renames',
action='append',
help='The list of library files that we prepend crazy. to their names.')
parser.add_argument('--warnings-as-errors',
action='store_true',
help='Treat all warnings as errors.')
diff_utils.AddCommandLineFlags(parser)
options = parser.parse_args(args)
options.assets = build_utils.ParseGnList(options.assets)
options.uncompressed_assets = build_utils.ParseGnList(
options.uncompressed_assets)
options.native_lib_placeholders = build_utils.ParseGnList(
options.native_lib_placeholders)
options.secondary_native_lib_placeholders = build_utils.ParseGnList(
options.secondary_native_lib_placeholders)
options.java_resources = build_utils.ParseGnList(options.java_resources)
options.native_libs = build_utils.ParseGnList(options.native_libs)
options.secondary_native_libs = build_utils.ParseGnList(
options.secondary_native_libs)
options.library_always_compress = build_utils.ParseGnList(
options.library_always_compress)
options.library_renames = build_utils.ParseGnList(options.library_renames)
# --apksigner-jar, --zipalign-path, --key-xxx arguments are
# required when building an APK, but not a bundle module.
if options.format == 'apk':
required_args = [
'apksigner_jar', 'zipalign_path', 'key_path', 'key_passwd', 'key_name'
]
for required in required_args:
if not vars(options)[required]:
raise Exception('Argument --%s is required for APKs.' % (
required.replace('_', '-')))
options.uncompress_shared_libraries = \
options.uncompress_shared_libraries in [ 'true', 'True' ]
if not options.android_abi and (options.native_libs or
options.native_lib_placeholders):
raise Exception('Must specify --android-abi with --native-libs')
if not options.secondary_android_abi and (options.secondary_native_libs or
options.secondary_native_lib_placeholders):
raise Exception('Must specify --secondary-android-abi with'
' --secondary-native-libs')
if options.is_multi_abi and not (options.android_abi
and options.secondary_android_abi):
raise Exception('Must specify --is-multi-abi with both --android-abi '
'and --secondary-android-abi.')
return options
def _SplitAssetPath(path):
"""Returns (src, dest) given an asset path in the form src[:dest]."""
path_parts = path.split(':')
src_path = path_parts[0]
if len(path_parts) > 1:
dest_path = path_parts[1]
else:
dest_path = os.path.basename(src_path)
return src_path, dest_path
def _ExpandPaths(paths):
"""Converts src:dst into tuples and enumerates files within directories.
Args:
paths: Paths in the form "src_path:dest_path"
Returns:
A list of (src_path, dest_path) tuples sorted by dest_path (for stable
ordering within output .apk).
"""
ret = []
for path in paths:
src_path, dest_path = _SplitAssetPath(path)
if os.path.isdir(src_path):
for f in build_utils.FindInDirectory(src_path, '*'):
ret.append((f, os.path.join(dest_path, f[len(src_path) + 1:])))
else:
ret.append((src_path, dest_path))
ret.sort(key=lambda t:t[1])
return ret
def _GetAssetsToAdd(path_tuples,
fast_align,
disable_compression=False,
allow_reads=True):
"""Returns the list of file_detail tuples for assets in the apk.
Args:
path_tuples: List of src_path, dest_path tuples to add.
fast_align: Whether to perform alignment in python zipfile (alternatively
alignment can be done using the zipalign utility out of band).
disable_compression: Whether to disable compression.
allow_reads: If false, we do not try to read the files from disk (to find
their size for example).
Returns: A list of (src_path, apk_path, compress, alignment) tuple
representing what and how assets are added.
"""
assets_to_add = []
# Group all uncompressed assets together in the hope that it will increase
# locality of mmap'ed files.
for target_compress in (False, True):
for src_path, dest_path in path_tuples:
compress = not disable_compression and (
os.path.splitext(src_path)[1] not in _NO_COMPRESS_EXTENSIONS)
if target_compress == compress:
# AddToZipHermetic() uses this logic to avoid growing small files.
# We need it here in order to set alignment correctly.
if allow_reads and compress and os.path.getsize(src_path) < 16:
compress = False
apk_path = 'assets/' + dest_path
alignment = 0 if compress and not fast_align else 4
assets_to_add.append((apk_path, src_path, compress, alignment))
return assets_to_add
def _AddFiles(apk, details):
"""Adds files to the apk.
Args:
apk: path to APK to add to.
details: A list of file detail tuples (src_path, apk_path, compress,
alignment) representing what and how files are added to the APK.
"""
for apk_path, src_path, compress, alignment in details:
# This check is only relevant for assets, but it should not matter if it is
# checked for the whole list of files.
try:
apk.getinfo(apk_path)
# Should never happen since write_build_config.py handles merging.
raise Exception(
'Multiple targets specified the asset path: %s' % apk_path)
except KeyError:
zipalign.AddToZipHermetic(
apk,
apk_path,
src_path=src_path,
compress=compress,
alignment=alignment)
def _GetNativeLibrariesToAdd(native_libs, android_abi, uncompress, fast_align,
lib_always_compress, lib_renames):
"""Returns the list of file_detail tuples for native libraries in the apk.
Returns: A list of (src_path, apk_path, compress, alignment) tuple
representing what and how native libraries are added.
"""
libraries_to_add = []
for path in native_libs:
basename = os.path.basename(path)
compress = not uncompress or any(lib_name in basename
for lib_name in lib_always_compress)
rename = any(lib_name in basename for lib_name in lib_renames)
if rename:
basename = 'crazy.' + basename
lib_android_abi = android_abi
if path.startswith('android_clang_arm64_hwasan/'):
lib_android_abi = 'arm64-v8a-hwasan'
apk_path = 'lib/%s/%s' % (lib_android_abi, basename)
alignment = 0 if compress and not fast_align else 0x1000
libraries_to_add.append((apk_path, path, compress, alignment))
return libraries_to_add
def _CreateExpectationsData(native_libs, assets):
"""Creates list of native libraries and assets."""
native_libs = sorted(native_libs)
assets = sorted(assets)
ret = []
for apk_path, _, compress, alignment in native_libs + assets:
ret.append('apk_path=%s, compress=%s, alignment=%s\n' %
(apk_path, compress, alignment))
return ''.join(ret)
def main(args):
build_utils.InitLogging('APKBUILDER_DEBUG')
args = build_utils.ExpandFileArgs(args)
options = _ParseArgs(args)
# Until Python 3.7, there's no better way to set compression level.
# The default is 6.
if options.best_compression:
# Compresses about twice as slow as the default.
zlib.Z_DEFAULT_COMPRESSION = 9
else:
# Compresses about twice as fast as the default.
zlib.Z_DEFAULT_COMPRESSION = 1
# Manually align only when alignment is necessary.
# Python's zip implementation duplicates file comments in the central
# directory, whereas zipalign does not, so use zipalign for official builds.
fast_align = options.format == 'apk' and not options.best_compression
native_libs = sorted(options.native_libs)
# Include native libs in the depfile_deps since GN doesn't know about the
# dependencies when is_component_build=true.
depfile_deps = list(native_libs)
# For targets that depend on static library APKs, dex paths are created by
# the static library's dexsplitter target and GN doesn't know about these
# paths.
if options.dex_file:
depfile_deps.append(options.dex_file)
secondary_native_libs = []
if options.secondary_native_libs:
secondary_native_libs = sorted(options.secondary_native_libs)
depfile_deps += secondary_native_libs
if options.java_resources:
# Included via .build_config, so need to write it to depfile.
depfile_deps.extend(options.java_resources)
assets = _ExpandPaths(options.assets)
uncompressed_assets = _ExpandPaths(options.uncompressed_assets)
# Included via .build_config, so need to write it to depfile.
depfile_deps.extend(x[0] for x in assets)
depfile_deps.extend(x[0] for x in uncompressed_assets)
# Bundle modules have a structure similar to APKs, except that resources
# are compiled in protobuf format (instead of binary xml), and that some
# files are located into different top-level directories, e.g.:
# AndroidManifest.xml -> manifest/AndroidManifest.xml
# classes.dex -> dex/classes.dex
# res/ -> res/ (unchanged)
# assets/ -> assets/ (unchanged)
# <other-file> -> root/<other-file>
#
# Hence, the following variables are used to control the location of files in
# the final archive.
if options.format == 'bundle-module':
apk_manifest_dir = 'manifest/'
apk_root_dir = 'root/'
apk_dex_dir = 'dex/'
else:
apk_manifest_dir = ''
apk_root_dir = ''
apk_dex_dir = ''
def _GetAssetDetails(assets, uncompressed_assets, fast_align, allow_reads):
ret = _GetAssetsToAdd(assets,
fast_align,
disable_compression=False,
allow_reads=allow_reads)
ret.extend(
_GetAssetsToAdd(uncompressed_assets,
fast_align,
disable_compression=True,
allow_reads=allow_reads))
return ret
libs_to_add = _GetNativeLibrariesToAdd(
native_libs, options.android_abi, options.uncompress_shared_libraries,
fast_align, options.library_always_compress, options.library_renames)
if options.secondary_android_abi:
libs_to_add.extend(
_GetNativeLibrariesToAdd(
secondary_native_libs, options.secondary_android_abi,
options.uncompress_shared_libraries, fast_align,
options.library_always_compress, options.library_renames))
if options.expected_file:
# We compute expectations without reading the files. This allows us to check
# expectations for different targets by just generating their build_configs
# and not have to first generate all the actual files and all their
# dependencies (for example by just passing --only-verify-expectations).
asset_details = _GetAssetDetails(assets,
uncompressed_assets,
fast_align,
allow_reads=False)
actual_data = _CreateExpectationsData(libs_to_add, asset_details)
diff_utils.CheckExpectations(actual_data, options)
if options.only_verify_expectations:
if options.depfile:
build_utils.WriteDepfile(options.depfile,
options.actual_file,
inputs=depfile_deps)
return
# If we are past this point, we are going to actually create the final apk so
# we should recompute asset details again but maybe perform some optimizations
# based on the size of the files on disk.
assets_to_add = _GetAssetDetails(
assets, uncompressed_assets, fast_align, allow_reads=True)
# Targets generally do not depend on apks, so no need for only_if_changed.
with build_utils.AtomicOutput(options.output_apk, only_if_changed=False) as f:
with zipfile.ZipFile(options.resource_apk) as resource_apk, \
zipfile.ZipFile(f, 'w') as out_apk:
def add_to_zip(zip_path, data, compress=True, alignment=4):
zipalign.AddToZipHermetic(
out_apk,
zip_path,
data=data,
compress=compress,
alignment=0 if compress and not fast_align else alignment)
def copy_resource(zipinfo, out_dir=''):
add_to_zip(
out_dir + zipinfo.filename,
resource_apk.read(zipinfo.filename),
compress=zipinfo.compress_type != zipfile.ZIP_STORED)
# Make assets come before resources in order to maintain the same file
# ordering as GYP / aapt. http://crbug.com/561862
resource_infos = resource_apk.infolist()
# 1. AndroidManifest.xml
logging.debug('Adding AndroidManifest.xml')
copy_resource(
resource_apk.getinfo('AndroidManifest.xml'), out_dir=apk_manifest_dir)
# 2. Assets
logging.debug('Adding assets/')
_AddFiles(out_apk, assets_to_add)
# 3. Dex files
logging.debug('Adding classes.dex')
if options.dex_file:
with open(options.dex_file, 'rb') as dex_file_obj:
if options.dex_file.endswith('.dex'):
max_dex_number = 1
# This is the case for incremental_install=true.
add_to_zip(
apk_dex_dir + 'classes.dex',
dex_file_obj.read(),
compress=not options.uncompress_dex)
else:
max_dex_number = 0
with zipfile.ZipFile(dex_file_obj) as dex_zip:
for dex in (d for d in dex_zip.namelist() if d.endswith('.dex')):
max_dex_number += 1
add_to_zip(
apk_dex_dir + dex,
dex_zip.read(dex),
compress=not options.uncompress_dex)
if options.jdk_libs_dex_file:
with open(options.jdk_libs_dex_file, 'rb') as dex_file_obj:
add_to_zip(
apk_dex_dir + 'classes{}.dex'.format(max_dex_number + 1),
dex_file_obj.read(),
compress=not options.uncompress_dex)
# 4. Native libraries.
logging.debug('Adding lib/')
_AddFiles(out_apk, libs_to_add)
# Add a placeholder lib if the APK should be multi ABI but is missing libs
# for one of the ABIs.
native_lib_placeholders = options.native_lib_placeholders
secondary_native_lib_placeholders = (
options.secondary_native_lib_placeholders)
if options.is_multi_abi:
if ((secondary_native_libs or secondary_native_lib_placeholders)
and not native_libs and not native_lib_placeholders):
native_lib_placeholders += ['libplaceholder.so']
if ((native_libs or native_lib_placeholders)
and not secondary_native_libs
and not secondary_native_lib_placeholders):
secondary_native_lib_placeholders += ['libplaceholder.so']
# Add placeholder libs.
for name in sorted(native_lib_placeholders):
# Note: Empty libs files are ignored by md5check (can cause issues
# with stale builds when the only change is adding/removing
# placeholders).
apk_path = 'lib/%s/%s' % (options.android_abi, name)
add_to_zip(apk_path, '', alignment=0x1000)
for name in sorted(secondary_native_lib_placeholders):
# Note: Empty libs files are ignored by md5check (can cause issues
# with stale builds when the only change is adding/removing
# placeholders).
apk_path = 'lib/%s/%s' % (options.secondary_android_abi, name)
add_to_zip(apk_path, '', alignment=0x1000)
# 5. Resources
logging.debug('Adding res/')
for info in sorted(resource_infos, key=lambda i: i.filename):
if info.filename != 'AndroidManifest.xml':
copy_resource(info)
# 6. Java resources that should be accessible via
# Class.getResourceAsStream(), in particular parts of Emma jar.
# Prebuilt jars may contain class files which we shouldn't include.
logging.debug('Adding Java resources')
for java_resource in options.java_resources:
with zipfile.ZipFile(java_resource, 'r') as java_resource_jar:
for apk_path in sorted(java_resource_jar.namelist()):
apk_path_lower = apk_path.lower()
if apk_path_lower.startswith('meta-inf/'):
continue
if apk_path_lower.endswith('/'):
continue
if apk_path_lower.endswith('.class'):
continue
add_to_zip(apk_root_dir + apk_path,
java_resource_jar.read(apk_path))
if options.format == 'apk':
zipalign_path = None if fast_align else options.zipalign_path
finalize_apk.FinalizeApk(options.apksigner_jar,
zipalign_path,
f.name,
f.name,
options.key_path,
options.key_passwd,
options.key_name,
int(options.min_sdk_version),
warnings_as_errors=options.warnings_as_errors)
logging.debug('Moving file into place')
if options.depfile:
build_utils.WriteDepfile(options.depfile,
options.output_apk,
inputs=depfile_deps)
if __name__ == '__main__':
main(sys.argv[1:])
|
import urllib.request, urllib.parse, urllib.error
from oauth2 import Request as OAuthRequest, SignatureMethod_HMAC_SHA1
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import ConsumerBasedOAuth, OAuthBackend, BaseOAuth2
from social_auth.utils import dsa_urlopen
class RdioBaseBackend(OAuthBackend):
def get_user_id(self, details, response):
return response['key']
def get_user_details(self, response):
return {
'username': response['username'],
'first_name': response['firstName'],
'last_name': response['lastName'],
'fullname': response['displayName'],
}
class RdioOAuth1Backend(RdioBaseBackend):
"""Rdio OAuth authentication backend"""
name = 'rdio-oauth1'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
]
@classmethod
def tokens(cls, instance):
token = super(RdioOAuth1Backend, cls).tokens(instance)
if token and 'access_token' in token:
token = dict(tok.split('=')
for tok in token['access_token'].split('&'))
return token
class RdioOAuth2Backend(RdioBaseBackend):
name = 'rdio-oauth2'
EXTRA_DATA = [
('key', 'rdio_id'),
('icon', 'rdio_icon_url'),
('url', 'rdio_profile_url'),
('username', 'rdio_username'),
('streamRegion', 'rdio_stream_region'),
('refresh_token', 'refresh_token', True),
('token_type', 'token_type', True),
]
class RdioOAuth1(ConsumerBasedOAuth):
AUTH_BACKEND = RdioOAuth1Backend
REQUEST_TOKEN_URL = 'http://api.rdio.com/oauth/request_token'
AUTHORIZATION_URL = 'https://www.rdio.com/oauth/authorize'
ACCESS_TOKEN_URL = 'http://api.rdio.com/oauth/access_token'
RDIO_API_BASE = 'http://api.rdio.com/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH1_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH1_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
}
request = self.oauth_post_request(access_token, self.RDIO_API_BASE,
params=params)
response = dsa_urlopen(request.url, request.to_postdata())
json = '\n'.join(response.readlines())
try:
return simplejson.loads(json)['result']
except ValueError:
return None
def oauth_post_request(self, token, url, params):
"""Generate OAuth request, setups callback url"""
if 'oauth_verifier' in self.data:
params['oauth_verifier'] = self.data['oauth_verifier']
request = OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=url,
parameters=params,
http_method='POST')
request.sign_request(SignatureMethod_HMAC_SHA1(), self.consumer, token)
return request
class RdioOAuth2(BaseOAuth2):
AUTH_BACKEND = RdioOAuth2Backend
AUTHORIZATION_URL = 'https://www.rdio.com/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://www.rdio.com/oauth2/token'
RDIO_API_BASE = 'https://www.rdio.com/api/1/'
SETTINGS_KEY_NAME = 'RDIO_OAUTH2_KEY'
SETTINGS_SECRET_NAME = 'RDIO_OAUTH2_SECRET'
SCOPE_VAR_NAME = 'RDIO2_PERMISSIONS'
EXTRA_PARAMS_VAR_NAME = 'RDIO2_EXTRA_PARAMS'
def user_data(self, access_token, *args, **kwargs):
params = {
'method': 'currentUser',
'extras': 'username,displayName,streamRegion',
'access_token': access_token,
}
response = dsa_urlopen(self.RDIO_API_BASE, urllib.parse.urlencode(params))
try:
return simplejson.load(response)['result']
except ValueError:
return None
BACKENDS = {
'rdio-oauth1': RdioOAuth1,
'rdio-oauth2': RdioOAuth2
}
|
import csv
import time
from datetime import timedelta
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse, HttpResponseForbidden, JsonResponse
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.utils.html import format_html
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.db import transaction, IntegrityError
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordChangeForm
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.timezone import now
from formtools.wizard.views import SessionWizardView
from .models import Petition, Signature, Organization, PytitionUser, PetitionTemplate, Permission
from .models import SlugModel
from .forms import SignatureForm, ContentFormPetition, EmailForm, NewsletterForm, SocialNetworkForm, ContentFormTemplate
from .forms import StyleForm, PetitionCreationStep1, PetitionCreationStep2, PetitionCreationStep3, UpdateInfoForm
from .forms import DeleteAccountForm, OrgCreationForm
from .helpers import get_client_ip, get_session_user, petition_from_id
from .helpers import check_petition_is_accessible
from .helpers import send_confirmation_email, subscribe_to_newsletter
from .helpers import get_update_form, petition_detail_meta
def index(request):
petitions = Petition.objects.filter(published=True).order_by('-id')[:12]
if not hasattr(settings, 'INDEX_PAGE'):
raise Http404(_("You must set an INDEX_PAGE config in your settings"))
if settings.INDEX_PAGE == 'USER_PROFILE':
try:
user_name = settings.INDEX_PAGE_USER
except:
raise Http404(_("You must set an INDEX_PAGE_USER config in your settings"))
elif settings.INDEX_PAGE == 'ORGA_PROFILE':
try:
org_name = settings.INDEX_PAGE_ORGA
except:
raise Http404(_("You must set an INDEX_PAGE_ORGA config in your settings"))
if settings.INDEX_PAGE == 'ALL_PETITIONS':
return redirect("all_petitions")
elif settings.INDEX_PAGE == 'ORGA_PROFILE':
org = Organization.objects.get(name=org_name)
return redirect("org_profile", org.slugname)
elif settings.INDEX_PAGE == 'USER_PROFILE':
return redirect("user_profile", user_name)
elif settings.INDEX_PAGE == 'LOGIN_REGISTER':
if request.user.is_authenticated:
return redirect("user_dashboard")
else:
return redirect("login")
else:
authenticated = request.user.is_authenticated
if authenticated:
user = get_session_user(request)
else:
user = request.user
return render(request, 'petition/index.html',
{
'user': user,
'petitions': petitions
}
)
def all_petitions(request):
petitions = Petition.objects.filter(published=True).all()
return render(request, 'petition/all_petitions.html',
{'petitions': petitions})
def search(request):
q = request.GET.get('q', '')
if q != "":
petitions = Petition.objects.filter(Q(title__icontains=q) | Q(text__icontains=q)).filter(published=True)[:15]
orgs = Organization.objects.filter(name__icontains=q)
else:
petitions = Petition.objects.filter(published=True)[:15]
orgs = []
return render(
request, 'petition/search.html',
{
'petitions': petitions,
'orgs': orgs,
'q': q
}
)
def detail(request, petition_id):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
sign_form = SignatureForm(petition=petition)
ctx = {"user": pytitionuser, 'petition': petition, 'form': sign_form,
'meta': petition_detail_meta(request, petition_id)}
return render(request, 'petition/petition_detail.html', ctx)
def confirm(request, petition_id, confirmation_hash):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
try:
successmsg = petition.confirm_signature(confirmation_hash)
if successmsg is None:
messages.error(request, _("Error: This confirmation code is invalid. Maybe you\'ve already confirmed?"))
else:
messages.success(request, successmsg)
except ValidationError as e:
messages.error(request, _(e.message))
except Signature.DoesNotExist:
messages.error(request, _("Error: This confirmation code is invalid."))
return redirect(petition.url)
@login_required
def get_csv_signature(request, petition_id, only_confirmed):
user = get_session_user(request)
try:
petition = Petition.objects.get(pk=petition_id)
except Petition.DoesNotExist:
return JsonResponse({}, status=404)
if petition.owner_type == "org":
if not petition.org.is_allowed_to(user, "can_view_signatures"):
return JsonResponse({}, status=403)
filename = '{}.csv'.format(petition)
signatures = Signature.objects.filter(petition = petition)
if only_confirmed:
signatures = signatures.filter(confirmed = True)
else:
signatures = signatures.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename={}'.format(filename).replace('\r\n', '').replace(' ', '%20')
writer = csv.writer(response)
attrs = ['first_name', 'last_name', 'phone', 'email', 'subscribed_to_mailinglist', 'confirmed']
writer.writerow(attrs)
for signature in signatures:
values = [getattr(signature, field) for field in attrs]
writer.writerow(values)
return response
@login_required
def go_send_confirmation_email(request, signature_id):
app_label = Signature._meta.app_label
signature = Signature.objects.filter(pk=signature_id).get()
send_confirmation_email(request, signature)
return redirect('admin:{}_signature_change'.format(app_label), signature_id)
def create_signature(request, petition_id):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
if request.method == "POST":
form = SignatureForm(petition=petition, data=request.POST)
if not form.is_valid():
return render(request, 'petition/petition_detail.html', {'petition': petition, 'form': form, 'meta': petition_detail_meta(request, petition_id)})
ipaddr = make_password(
get_client_ip(request),
salt=petition.salt.encode('utf-8'))
since = now() - timedelta(seconds=settings.SIGNATURE_THROTTLE_TIMING)
signatures = Signature.objects.filter(
petition=petition,
ipaddress=ipaddr,
date__gt=since)
if signatures.count() > settings.SIGNATURE_THROTTLE:
messages.error(request, _("Too many signatures from your IP address, please try again later."))
return render(request, 'petition/petition_detail.html', {'petition': petition, 'form': form, 'meta': petition_detail_meta(request, petition_id)})
else:
signature = form.save()
signature.ipaddress = ipaddr
signature.save()
send_confirmation_email(request, signature)
messages.success(request,
format_html(_("Thank you for signing this petition, an email has just been sent to you at your address \'{}\'" \
" in order to confirm your signature.<br>" \
"You will need to click on the confirmation link in the email.<br>" \
"If you cannot find the email in your Inbox, please have a look in your Spam box.")\
, signature.email))
if petition.has_newsletter and signature.subscribed_to_mailinglist:
subscribe_to_newsletter(petition, signature.email)
return redirect(petition.url)
@login_required
def org_dashboard(request, orgslugname):
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
messages.error(request, _("This organization does not exist: '{}'".format(orgslugname)))
return redirect("user_dashboard")
pytitionuser = get_session_user(request)
if pytitionuser not in org.members.all():
messages.error(request, _("You are not part of this organization: '{}'".format(org.name)))
return redirect("user_dashboard")
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
messages.error(request,
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)))
return redirect("user_dashboard")
can_create_petition = org.is_allowed_to(pytitionuser, "can_create_petitions")
petitions = org.petition_set.all()
other_orgs = pytitionuser.organization_set.filter(~Q(name=org.name)).all()
return render(request, 'petition/org_dashboard.html',
{'org': org, 'user': pytitionuser, "other_orgs": other_orgs,
'petitions': petitions, 'user_permissions': permissions,
'can_create_petition': can_create_petition})
@login_required
def user_dashboard(request):
user = get_session_user(request)
petitions = user.petition_set.all()
return render(
request,
'petition/user_dashboard.html',
{'user': user, 'petitions': petitions, 'can_create_petition': True}
)
def user_profile(request, user_name):
try:
user = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
raise Http404(_("not found"))
ctx = {'user': user,
'petitions': user.petition_set.filter(published=True)}
return render(request, 'petition/user_profile.html', ctx)
@login_required
def leave_org(request, orgslugname):
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
pytitionuser = get_session_user(request)
if pytitionuser not in org.members.all():
raise Http404(_("not found"))
with transaction.atomic():
if org.is_last_admin(pytitionuser):
messages.error(request, _('Impossible to leave this organisation, you are the last administrator'))
return redirect(reverse('account_settings') + '#a_org_form')
elif org.members.count() == 1:
messages.error(request, _('Impossible to leave this organisation, you are the last member'))
return redirect(reverse('account_settings') + '#a_org_form')
else:
org.members.remove(pytitionuser)
return redirect('account_settings')
def org_profile(request, orgslugname):
try:
user = get_session_user(request)
except:
user = None
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
ctx = {'org': org,
'petitions': org.petition_set.filter(published=True)}
# if a user is logged-in, put it in the context, it will feed the navbar dropdown
if user is not None:
ctx['user'] = user
return render(request, "petition/org_profile.html", ctx)
@login_required
def get_user_list(request):
q = request.GET.get('q', '')
if q != "":
users = PytitionUser.objects.filter(Q(user__username__contains=q) | Q(user__first_name__icontains=q) |
Q(user__last_name__icontains=q)).all()
else:
users = []
userdict = {
"values": [user.user.username for user in users],
}
return JsonResponse(userdict)
@login_required
def org_add_user(request, orgslugname):
adduser = request.GET.get('user', '')
try:
adduser = PytitionUser.objects.get(user__username=adduser)
except PytitionUser.DoesNotExist:
message = _("This user does not exist (anylonger?)")
return JsonResponse({"message": message}, status=404)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
message = _("This organization does not exist (anylonger?)")
return JsonResponse({"message": message}, status=404)
pytitionuser = get_session_user(request)
if org not in pytitionuser.organization_set.all():
message = _("You are not part of this organization.")
return JsonResponse({"message": message}, status=403)
if org in adduser.organization_set.all():
message = _("User is already member of {orgname} organization".format(orgname=org.name))
return JsonResponse({"message": message}, status=500)
if not org.is_allowed_to(pytitionuser, "can_add_members"):
message = _("You are not allowed to invite new members into this organization.")
return JsonResponse({"message": message}, status=403)
try:
adduser.invitations.add(org)
adduser.save()
except:
message = _("An error occured")
return JsonResponse({"message": message}, status=500)
message = _("You invited {username} to join {orgname}".format(username=adduser.name, orgname=org.name))
return JsonResponse({"message": message})
@login_required
def invite_accept(request, orgslugname):
if orgslugname == "":
return HttpResponse(status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
if org in pytitionuser.invitations.all():
try:
with transaction.atomic():
pytitionuser.invitations.remove(org)
org.members.add(pytitionuser)
except:
return HttpResponse(status=500)
else:
raise Http404(_("not found"))
return redirect('user_dashboard')
@login_required
def invite_dismiss(request, orgslugname):
if orgslugname == "":
return JsonResponse({}, status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
if org in pytitionuser.invitations.all():
try:
pytitionuser.invitations.remove(org)
except:
return JsonResponse({}, status=500)
else:
raise Http404(_("not found"))
return redirect('user_dashboard')
@login_required
def new_template(request, orgslugname=None):
pytitionuser = get_session_user(request)
ctx = {'user': pytitionuser}
if orgslugname:
redirection = "org_new_template"
try:
org = Organization.objects.get(slugname=orgslugname)
ctx['org'] = org
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if org not in pytitionuser.organization_set.all():
return HttpResponseForbidden(_("You are not allowed to view this organization dashboard"))
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
ctx['user_permissions'] = permissions
except Permission.DoesNotExist:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
if not permissions.can_create_templates:
return HttpResponseForbidden(_("You don't have the permission to create a Template in this organization"))
ctx['base_template'] = 'petition/org_base.html'
else:
redirection = "user_new_template"
ctx['base_template'] = 'petition/user_base.html'
if request.method == "POST":
template_name = request.POST.get('template_name', '')
if template_name != '':
if orgslugname:
template = PetitionTemplate(name=template_name, org=org)
else:
template = PetitionTemplate(name=template_name, user=pytitionuser)
template.save()
return redirect("edit_template", template.id)
else:
messages.error(request, _("You need to provide a template name."))
return redirect(redirection)
else:
return render(request, "petition/new_template.html", ctx)
@login_required
def edit_template(request, template_id):
id = template_id
if id == '':
return HttpResponseForbidden(_("You need to provide the template id to modify"))
try:
template = PetitionTemplate.objects.get(pk=id)
except PetitionTemplate.DoesNotExist:
raise Http404(_("This template does not exist"))
pytitionuser = get_session_user(request)
context = {'user': pytitionuser}
if template.owner_type == "org":
owner = template.org
else:
owner = template.user
if template.owner_type == "org":
try:
permissions = Permission.objects.get(organization=owner, user=pytitionuser)
except:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=owner.name)), status=500)
context['user_permissions'] = permissions
if owner not in pytitionuser.organization_set.all() or not permissions.can_modify_templates:
return HttpResponseForbidden(_("You are not allowed to edit this organization's templates"))
context['org'] = owner
base_template = "petition/org_base.html"
else:
if owner != pytitionuser:
return HttpResponseForbidden(_("You are not allowed to edit this user's templates"))
base_template = "petition/user_base.html"
submitted_ctx = {
'content_form_submitted': False,
'email_form_submitted': False,
'social_network_form_submitted': False,
'newsletter_form_submitted': False,
'style_form_submitted': False,
}
if request.method == "POST":
if 'content_form_submitted' in request.POST:
content_form = ContentFormTemplate(request.POST)
submitted_ctx['content_form_submitted'] = True
if content_form.is_valid():
template.name = content_form.cleaned_data['name']
template.text = content_form.cleaned_data['text']
template.side_text = content_form.cleaned_data['side_text']
template.footer_text = content_form.cleaned_data['footer_text']
template.footer_links = content_form.cleaned_data['footer_links']
template.sign_form_footer = content_form.cleaned_data['sign_form_footer']
template.save()
else:
content_form = ContentFormTemplate({f: getattr(template, f) for f in ContentFormTemplate.base_fields})
if 'email_form_submitted' in request.POST:
email_form = EmailForm(request.POST)
submitted_ctx['email_form_submitted'] = True
if email_form.is_valid():
template.confirmation_email_reply = email_form.cleaned_data['confirmation_email_reply']
template.save()
else:
email_form = EmailForm({f: getattr(template, f) for f in EmailForm.base_fields})
if 'social_network_form_submitted' in request.POST:
social_network_form = SocialNetworkForm(request.POST)
submitted_ctx['social_network_form_submitted'] = True
if social_network_form.is_valid():
template.twitter_description = social_network_form.cleaned_data['twitter_description']
template.twitter_image = social_network_form.cleaned_data['twitter_image']
template.org_twitter_handle = social_network_form.cleaned_data['org_twitter_handle']
template.save()
else:
social_network_form = SocialNetworkForm({f: getattr(template, f) for f in SocialNetworkForm.base_fields})
if 'newsletter_form_submitted' in request.POST:
newsletter_form = NewsletterForm(request.POST)
submitted_ctx['newsletter_form_submitted'] = True
if newsletter_form.is_valid():
template.has_newsletter = newsletter_form.cleaned_data['has_newsletter']
template.newsletter_text = newsletter_form.cleaned_data['newsletter_text']
template.newsletter_subscribe_http_data = newsletter_form.cleaned_data['newsletter_subscribe_http_data']
template.newsletter_subscribe_http_mailfield = newsletter_form.cleaned_data['newsletter_subscribe_http_mailfield']
template.newsletter_subscribe_http_url = newsletter_form.cleaned_data['newsletter_subscribe_http_url']
template.newsletter_subscribe_mail_subject = newsletter_form.cleaned_data['newsletter_subscribe_mail_subject']
template.newsletter_subscribe_mail_from = newsletter_form.cleaned_data['newsletter_subscribe_mail_from']
template.newsletter_subscribe_mail_to = newsletter_form.cleaned_data['newsletter_subscribe_mail_to']
template.newsletter_subscribe_method = newsletter_form.cleaned_data['newsletter_subscribe_method']
template.newsletter_subscribe_mail_smtp_host = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_host']
template.newsletter_subscribe_mail_smtp_port = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_port']
template.newsletter_subscribe_mail_smtp_user = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_user']
template.newsletter_subscribe_mail_smtp_password = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_password']
template.newsletter_subscribe_mail_smtp_tls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_tls']
template.newsletter_subscribe_mail_smtp_starttls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_starttls']
template.save()
else:
newsletter_form = NewsletterForm({f: getattr(template, f) for f in NewsletterForm.base_fields})
if 'style_form_submitted' in request.POST:
submitted_ctx['style_form_submitted'] = True
style_form = StyleForm(request.POST)
if style_form.is_valid():
template.bgcolor = style_form.cleaned_data['bgcolor']
template.linear_gradient_direction = style_form.cleaned_data['linear_gradient_direction']
template.gradient_from = style_form.cleaned_data['gradient_from']
template.gradient_to = style_form.cleaned_data['gradient_to']
template.save()
else:
style_form = StyleForm({f: getattr(template, f) for f in StyleForm.base_fields})
else:
content_form = ContentFormTemplate({f: getattr(template, f) for f in ContentFormTemplate.base_fields})
email_form = EmailForm({f: getattr(template, f) for f in EmailForm.base_fields})
social_network_form = SocialNetworkForm({f: getattr(template, f) for f in SocialNetworkForm.base_fields})
newsletter_form = NewsletterForm({f: getattr(template, f) for f in NewsletterForm.base_fields})
style_form = StyleForm({f: getattr(template, f) for f in StyleForm.base_fields})
ctx = {'content_form': content_form,
'email_form': email_form,
'social_network_form': social_network_form,
'newsletter_form': newsletter_form,
'style_form': style_form,
'petition': template}
context['base_template'] = base_template
context.update(ctx)
context.update(submitted_ctx)
return render(request, "petition/edit_template.html", context)
@login_required
def template_delete(request, template_id):
pytitionuser = get_session_user(request)
if template_id == '':
return JsonResponse({}, status=500)
try:
template = PetitionTemplate.objects.get(pk=template_id)
except:
return JsonResponse({}, status=404)
if template.owner_type == "org":
if not pytitionuser in template.org.members.all():
return JsonResponse({}, status=403) # User not in organization
try:
permissions = Permission.objects.get(
organization=template.org,
user=pytitionuser)
except Permission.DoesNotExist:
return JsonResponse({}, status=500) # No permission? fatal error!
if not permissions.can_delete_templates:
return JsonResponse({}, status=403) # User does not have the permission!
else:
if pytitionuser != template.user:
return JsonResponse({}, status=403) # User cannot delete a template if it's not his
template.delete()
return JsonResponse({})
@login_required
def template_fav_toggle(request, template_id):
pytitionuser = get_session_user(request)
if template_id == '':
return JsonResponse({}, status=500)
try:
template = PetitionTemplate.objects.get(pk=template_id)
except PetitionTemplate.DoesNotExist:
return JsonResponse({}, status=404)
if template.owner_type == "org":
owner = template.org
else:
owner = template.user
if template.owner_type == "org":
if owner not in pytitionuser.organization_set.all():
return JsonResponse({}, status=403) # Forbidden
else:
if owner != pytitionuser:
return JsonResponse({'msg': _("You are not allowed to change this user's default template")}, status=403)
if owner.default_template == template:
owner.default_template = None
else:
owner.default_template = template
owner.save()
return JsonResponse({})
@login_required
def org_delete_member(request, orgslugname):
member_name = request.GET.get('member', '')
try:
member = PytitionUser.objects.get(user__username=member_name)
except PytitionUser.DoesNotExist:
raise Http404(_("User does not exist"))
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if pytitionuser not in org.members.all():
return JsonResponse({}, status=403) # Forbidden
try:
permissions = Permission.objects.get(user=pytitionuser, organization=org)
except Permission.DoesNoeExist:
return JsonResponse({}, status=500)
if permissions.can_remove_members or pytitionuser == member:
if org in member.organization_set.all():
if org.is_last_admin(member):
return JsonResponse({}, status=403) # Forbidden
member.organization_set.remove(org)
else:
return JsonResponse({}, status=404)
else:
return JsonResponse({}, status=403) # Forbidden
return JsonResponse({}, status=200)
@login_required
def org_edit_user_perms(request, orgslugname, user_name):
"""Shows the page which lists the user permissions."""
pytitionuser = get_session_user(request)
try:
member = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
messages.error(request, _("User '{name}' does not exist".format(name=user_name)))
return redirect("org_dashboard", orgslugname)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization '{name}' does not exist".format(name=orgslugname)))
if org not in member.organization_set.all():
messages.error(request, _("The user '{username}' is not member of this organization ({orgname}).".
format(username=user_name, orgname=org.name)))
return redirect("org_dashboard", org.slugname)
try:
permissions = Permission.objects.get(organization=org, user=member)
except Permission.DoesNotExist:
messages.error(request,
_("Internal error, this member does not have permissions attached to this organization."))
return redirect("org_dashboard", org.slugname)
try:
user_permissions = Permission.objects.get(organization=org, user=pytitionuser)
except:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
return render(request, "petition/org_edit_user_perms.html",
{'org': org, 'member': member, 'user': pytitionuser,
'permissions': permissions,
'user_permissions': user_permissions})
@login_required
def org_set_user_perms(request, orgslugname, user_name):
"""Actually do the modification of user permissions.
Data come from "org_edit_user_perms" view's form.
"""
pytitionuser = get_session_user(request)
try:
member = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
messages.error(request, _("User does not exist"))
return redirect("org_dashboard", orgslugname)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if org not in member.organization_set.all():
messages.error(request, _("This user is not part of organization \'{orgname}\'".format(orgname=org.name)))
return redirect("org_dashboard", org.slugname)
try:
permissions = Permission.objects.get(user=member, organization=org)
except Permission.DoesNotExist:
messages.error(request, _("Fatal error, this user does not have permissions attached for this organization"))
return redirect("org_dashboard", org.slugname)
try:
userperms = Permission.objects.get(user=pytitionuser, organization=org)
except:
messages.error(request, _("Fatal error, you don't have permissions attached to you for this organization"))
return redirect("org_dashboard", org.slugname)
if pytitionuser not in org.members.all():
messages.error(request, _("You are not part of this organization"))
return redirect("user_dashboard")
if not userperms.can_modify_permissions:
messages.error(request, _("You are not allowed to modify this organization members' permissions"))
return redirect("org_edit_user_perms", orgslugname, user_name)
if request.method == "POST":
error = False
post = request.POST
permissions.can_remove_members = post.get('can_remove_members', '') == 'on'
permissions.can_add_members = post.get('can_add_members', '') == 'on'
permissions.can_create_petitions = post.get('can_create_petitions', '') == 'on'
permissions.can_modify_petitions = post.get('can_modify_petitions', '') == 'on'
permissions.can_delete_petitions = post.get('can_delete_petitions', '') == 'on'
permissions.can_create_templates = post.get('can_create_templates', '') == 'on'
permissions.can_modify_templates = post.get('can_modify_templates', '') == 'on'
permissions.can_delete_templates = post.get('can_delete_templates', '') == 'on'
permissions.can_view_signatures = post.get('can_view_signatures', '') == 'on'
permissions.can_modify_signatures = post.get('can_modify_signatures', '') == 'on'
permissions.can_delete_signatures = post.get('can_delete_signatures', '') == 'on'
can_modify_perms = post.get('can_modify_permissions', '') == 'on'
with transaction.atomic():
# if user is dropping his own permissions
if not can_modify_perms and permissions.can_modify_permissions and pytitionuser == member:
# get list of people with can_modify_permissions permission on this org
owners = org.owners
if owners.count() > 1:
permissions.can_modify_permissions = can_modify_perms
else:
if org.members.count() > 1:
error = True
messages.error(request, _("You cannot remove your ability to change permissions on this "
"Organization because you are the only one left who can do this. "
"Give the permission to someone else before removing yours."))
else:
error = True
messages.error(request, _("You cannot remove your ability to change permissions on this "
"Organization because you are the only member left."))
if not error:
permissions.can_modify_permissions = can_modify_perms
messages.success(request, _("Permissions successfully changed!"))
permissions.save()
return redirect("org_edit_user_perms", orgslugname, user_name)
WizardTemplates = {"step1": "petition/new_petition_step1.html",
"step2": "petition/new_petition_step2.html",
"step3": "petition/new_petition_step3.html"}
WizardForms = [("step1", PetitionCreationStep1),
("step2", PetitionCreationStep2),
("step3", PetitionCreationStep3)]
@method_decorator(login_required, name='dispatch')
class PetitionCreationWizard(SessionWizardView):
def get_template_names(self):
return [WizardTemplates[self.steps.current]]
def get_form_initial(self, step):
if step == "step2":
use_template = False
org_petition = "orgslugname" in self.kwargs
if org_petition:
orgslugname = self.kwargs['orgslugname']
org = Organization.objects.get(slugname=orgslugname)
else:
pytitionuser = get_session_user(self.request)
# Use a specific template if its id is given
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if org_petition:
if template in org.petitiontemplate_set.all():
return {'message': template.text}
else:
if template in pytitionuser.petitiontemplate_set.all():
return {'message': template.text}
# if no template id is given, check for default templates
if org_petition:
if org.default_template is not None:
template = org.default_template
use_template = True
elif pytitionuser.default_template is not None:
template = pytitionuser.default_template
use_template = True
if use_template:
return {'message': template.text}
return self.initial_dict.get(step, {})
def get_form_kwargs(self, step=None):
if step == "step1":
org_petition = "orgslugname" in self.kwargs
if org_petition:
orgslugname = self.kwargs['orgslugname']
kwargs = {"orgslugname": orgslugname}
else:
pytitionuser = get_session_user(self.request)
kwargs = {"user_name": pytitionuser.user.username}
return kwargs
else:
return {}
def done(self, form_list, **kwargs):
org_petition = "orgslugname" in self.kwargs
title = self.get_cleaned_data_for_step("step1")["title"]
message = self.get_cleaned_data_for_step("step2")["message"]
publish = self.get_cleaned_data_for_step("step3")["publish"]
pytitionuser = get_session_user(self.request)
_redirect = self.request.POST.get('redirect', '')
if org_petition:
orgslugname = self.kwargs['orgslugname']
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
messages.error(self.request, _("Cannot find this organization"))
return redirect("user_dashboard")
#raise Http404(_("Organization does not exist"))
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
return redirect("org_dashboard", orgslugname)
if pytitionuser in org.members.all() and permissions.can_create_petitions:
#FIXME I think new here is better than create
petition = Petition.objects.create(title=title, text=message, org=org)
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if template in org.petitiontemplate_set.all():
petition.prepopulate_from_template(template)
petition.save()
else:
messages.error(self.request, _("This template does not belong to your organization"))
return redirect("org_dashboard", orgslugname)
if publish:
petition.publish()
if _redirect and _redirect == '1':
return redirect("edit_petition", petition.id)
else:
return redirect("org_dashboard", orgslugname)
else:
messages.error(self.request, _("You don't have the permission to create a new petition in this Organization"))
return redirect("org_dashboard", orgslugname)
else:
petition = Petition.objects.create(title=title, text=message, user=pytitionuser)
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if template in pytitionuser.petitiontemplate_set.all():
petition.prepopulate_from_template(template)
petition.save()
else:
messages.error(self.request, _("This template does not belong to you"))
return redirect("user_dashboard")
if publish:
petition.publish()
if _redirect and _redirect == '1':
return redirect("edit_petition", petition.id)
else:
return redirect("user_dashboard")
def get_context_data(self, form, **kwargs):
org_petition = "orgslugname" in self.kwargs
context = super(PetitionCreationWizard, self).get_context_data(form=form, **kwargs)
if org_petition:
base_template = 'petition/org_base.html'
try:
org = Organization.objects.get(slugname=self.kwargs['orgslugname'])
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
else:
base_template = 'petition/user_base.html'
pytitionuser = get_session_user(self.request)
context.update({'user': pytitionuser,
'base_template': base_template})
if org_petition:
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
context.update({'org': org,
'user_permissions': permissions})
if self.steps.current == "step3":
context.update(self.get_cleaned_data_for_step("step1"))
context.update(self.get_cleaned_data_for_step("step2"))
return context
@login_required
def petition_delete(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.delete()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
else: # an organization owns the petition
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_delete_petitions:
petition.delete()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
@login_required
def petition_publish(request, petition_id):
pytitionuser = get_session_user(request)
petition = petition_from_id(petition_id)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.publish()
return JsonResponse({})
else:
# Petition owned by someone else
return JsonResponse({}, status=403)
else:
# Check if the user has permission over this org
try:
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_modify_petitions:
petition.publish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
except Permission.DoesNotExist:
return JsonResponse({}, status=403)
@login_required
def petition_unpublish(request, petition_id):
pytitionuser = get_session_user(request)
petition = petition_from_id(petition_id)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.unpublish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
else:
# Check if the user has permission over this org
try:
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_modify_petitions:
petition.unpublish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
except Permission.DoesNotExist:
return JsonResponse({}, status=403)
@login_required
def edit_petition(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
if not petition.is_allowed_to_edit(pytitionuser):
messages.error(request, _("You are not allowed to edit this petition"))
return redirect("user_dashboard")
submitted_ctx = {
'content_form_submitted': False,
'email_form_submitted': False,
'social_network_form_submitted': False,
'newsletter_form_submitted': False,
}
if request.method == "POST":
if 'content_form_submitted' in request.POST:
submitted_ctx['content_form_submitted'] = True
content_form = ContentFormPetition(request.POST)
if content_form.is_valid():
petition.title = content_form.cleaned_data['title']
petition.target = content_form.cleaned_data['target']
petition.text = content_form.cleaned_data['text']
petition.side_text = content_form.cleaned_data['side_text']
petition.footer_text = content_form.cleaned_data['footer_text']
petition.footer_links = content_form.cleaned_data['footer_links']
petition.sign_form_footer = content_form.cleaned_data['sign_form_footer']
petition.save()
else:
content_form = ContentFormPetition({f: getattr(petition, f) for f in ContentFormPetition.base_fields})
if 'email_form_submitted' in request.POST:
submitted_ctx['email_form_submitted'] = True
email_form = EmailForm(request.POST)
if email_form.is_valid():
petition.confirmation_email_reply = email_form.cleaned_data['confirmation_email_reply']
petition.save()
else:
email_form = EmailForm({f: getattr(petition, f) for f in EmailForm.base_fields})
if 'social_network_form_submitted' in request.POST:
submitted_ctx['social_network_form_submitted'] = True
social_network_form = SocialNetworkForm(request.POST)
if social_network_form.is_valid():
petition.twitter_description = social_network_form.cleaned_data['twitter_description']
petition.twitter_image = social_network_form.cleaned_data['twitter_image']
petition.org_twitter_handle = social_network_form.cleaned_data['org_twitter_handle']
petition.save()
else:
social_network_form = SocialNetworkForm({f: getattr(petition, f) for f in SocialNetworkForm.base_fields})
if 'newsletter_form_submitted' in request.POST:
submitted_ctx['newsletter_form_submitted'] = True
newsletter_form = NewsletterForm(request.POST)
if newsletter_form.is_valid():
petition.has_newsletter = newsletter_form.cleaned_data['has_newsletter']
petition.newsletter_text = newsletter_form.cleaned_data['newsletter_text']
petition.newsletter_subscribe_http_data = newsletter_form.cleaned_data['newsletter_subscribe_http_data']
petition.newsletter_subscribe_http_mailfield = newsletter_form.cleaned_data['newsletter_subscribe_http_mailfield']
petition.newsletter_subscribe_http_url = newsletter_form.cleaned_data['newsletter_subscribe_http_url']
petition.newsletter_subscribe_mail_subject = newsletter_form.cleaned_data['newsletter_subscribe_mail_subject']
petition.newsletter_subscribe_mail_from = newsletter_form.cleaned_data['newsletter_subscribe_mail_from']
petition.newsletter_subscribe_mail_to = newsletter_form.cleaned_data['newsletter_subscribe_mail_to']
petition.newsletter_subscribe_method = newsletter_form.cleaned_data['newsletter_subscribe_method']
petition.newsletter_subscribe_mail_smtp_host = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_host']
petition.newsletter_subscribe_mail_smtp_port = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_port']
petition.newsletter_subscribe_mail_smtp_user = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_user']
petition.newsletter_subscribe_mail_smtp_password = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_password']
petition.newsletter_subscribe_mail_smtp_tls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_tls']
petition.newsletter_subscribe_mail_smtp_starttls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_starttls']
petition.save()
else:
newsletter_form = NewsletterForm({f: getattr(petition, f) for f in NewsletterForm.base_fields})
if 'style_form_submitted' in request.POST:
submitted_ctx['style_form_submitted'] = True
style_form = StyleForm(request.POST)
if style_form.is_valid():
petition.bgcolor = style_form.cleaned_data['bgcolor']
petition.linear_gradient_direction = style_form.cleaned_data['linear_gradient_direction']
petition.gradient_from = style_form.cleaned_data['gradient_from']
petition.gradient_to = style_form.cleaned_data['gradient_to']
petition.save()
else:
style_form = StyleForm({f: getattr(petition, f) for f in StyleForm.base_fields})
else:
content_form = ContentFormPetition({f: getattr(petition, f) for f in ContentFormPetition.base_fields})
style_form = StyleForm({f: getattr(petition, f) for f in StyleForm.base_fields})
email_form = EmailForm({f: getattr(petition, f) for f in EmailForm.base_fields})
social_network_form = SocialNetworkForm({f: getattr(petition, f) for f in SocialNetworkForm.base_fields})
newsletter_form = NewsletterForm({f: getattr(petition, f) for f in NewsletterForm.base_fields})
ctx = {'user': pytitionuser,
'content_form': content_form,
'style_form': style_form,
'email_form': email_form,
'social_network_form': social_network_form,
'newsletter_form': newsletter_form,
'petition': petition}
url_prefix = request.scheme + "://" + request.get_host()
if petition.owner_type == "org":
permissions = Permission.objects.get(organization=petition.org, user=pytitionuser)
example_url = url_prefix + reverse("slug_show_petition",
kwargs={'orgslugname': petition.org.slugname,
'petitionname': _("save-the-kittens-from-bad-wolf")})
slug_prefix = (url_prefix + reverse("slug_show_petition",
kwargs={'orgslugname': petition.org.slugname,
'petitionname': 'toto'})).rsplit('/', 1)[0]
ctx.update({'org': petition.org,
'user_permissions': permissions,
'base_template': 'petition/org_base.html',
'example_url': example_url,
'slug_prefix': slug_prefix})
else:
example_url = url_prefix + reverse("slug_show_petition",
kwargs={'username': pytitionuser.user.username,
'petitionname': _("save-the-kittens-from-bad-wolf")})
slug_prefix = (url_prefix + reverse("slug_show_petition",
kwargs={'username': pytitionuser.user.username,
'petitionname': 'toto'})).rsplit('/', 1)[0]
ctx.update({'base_template': 'petition/user_base.html',
'example_url': example_url,
'slug_prefix': slug_prefix})
ctx.update(submitted_ctx)
return render(request, "petition/edit_petition.html", ctx)
@login_required
def show_signatures(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
ctx = {}
if petition.owner_type == "user":
base_template = 'petition/user_base.html'
else:
org = petition.org
base_template = 'petition/org_base.html'
other_orgs = pytitionuser.organization_set.filter(~Q(name=org.name)).all()
if pytitionuser not in org.members.all():
messages.error(request, _("You are not member of the following organization: \'{}\'".format(org.name)))
return redirect("user_dashboard")
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
messages.error(request, _("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')".format(orgname=org.name)))
return redirect("user_dashboard")
if not permissions.can_view_signatures:
messages.error(request, _("You are not allowed to view signatures in this organization"))
return redirect("org_dashboard", org.slugname)
ctx.update({'org': org, 'other_orgs': other_orgs,
'user_permissions': permissions})
if request.method == "POST":
action = request.POST.get('action', '')
selected_signature_ids = request.POST.getlist('signature_id', '')
failed = False
if selected_signature_ids and action:
selected_signatures = Signature.objects.filter(pk__in=selected_signature_ids)
if action == "delete":
for s in selected_signatures:
pet = s.petition
if pet.org: # Petition is owned by an org, we check for rights
if pet.org.is_allowed_to(pytitionuser, 'can_delete_signatures'):
s.delete()
else:
failed = True
else: # Petition is owned by a user, we check it's the one asking for deletion
if pet.user == pytitionuser:
s.delete()
else:
failed = True
if failed:
messages.error(request, _("You don't have permission to delete some or all of selected signatures"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
if action == "re-send":
for s in selected_signatures:
try:
send_confirmation_email(request, s)
except:
failed = True
if failed:
messages.error(request, _("An error happened while trying to re-send confirmation emails"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
if action == "re-send-all":
selected_signatures = Signature.objects.filter(petition=petition)
for s in selected_signatures:
try:
send_confirmation_email(request, s)
except:
failed = True
if failed:
messages.error(request, _("An error happened while trying to re-send confirmation emails"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
return redirect("show_signatures", petition_id)
signatures = petition.signature_set.all()
ctx.update({'petition': petition, 'user': pytitionuser,
'base_template': base_template,
'signatures': signatures})
return render(request, "petition/signature_data.html", ctx)
@login_required
def account_settings(request):
pytitionuser = get_session_user(request)
submitted_ctx = {
'update_info_form_submitted': False,
'delete_account_form_submitted': False,
'password_change_form_submitted': False
}
if request.method == "POST":
if 'update_info_form_submitted' in request.POST:
update_info_form = UpdateInfoForm(pytitionuser.user, request.POST)
submitted_ctx['update_info_form_submitted'] = True
if update_info_form.is_valid():
update_info_form.save()
else:
update_info_form = get_update_form(pytitionuser.user)
if 'delete_account_form_submitted' in request.POST:
delete_account_form = DeleteAccountForm(request.POST)
submitted_ctx['delete_account_form_submitted'] = True
if delete_account_form.is_valid():
pytitionuser.drop()
return redirect("index")
else:
delete_account_form = DeleteAccountForm()
if 'password_change_form_submitted' in request.POST:
password_change_form = PasswordChangeForm(pytitionuser.user, request.POST)
submitted_ctx['password_change_form_submitted'] = True
if password_change_form.is_valid():
password_change_form.save()
messages.success(request, _("You successfully changed your password!"))
else:
password_change_form = PasswordChangeForm(pytitionuser.user)
else:
update_info_form = get_update_form(pytitionuser.user)
delete_account_form = DeleteAccountForm()
password_change_form = PasswordChangeForm(pytitionuser.user)
orgs = pytitionuser.organization_set.all()
# Checking if the user is allowed to leave the organisation
for org in orgs:
if org.members.count() < 2:
org.leave = False
else:
# More than one user, we need to check owners
owners = org.owners.all()
if owners.count() == 1 and pytitionuser in owners:
org.leave = False
else:
org.leave = True
ctx = {'user': pytitionuser,
'update_info_form': update_info_form,
'delete_account_form': delete_account_form,
'password_change_form': password_change_form,
'base_template': 'petition/user_base.html',
'orgs': orgs}
ctx.update(submitted_ctx)
return render(request, "petition/account_settings.html", ctx)
@login_required
def org_create(request):
user = get_session_user(request)
ctx = {'user': user}
if request.method == "POST":
form = OrgCreationForm(request.POST)
if form.is_valid():
org = form.save()
org.members.add(user)
perm = Permission.objects.get(organization=org)
perm.set_all(True)
messages.success(request, _("You successfully created organization '{}'".format(org.name)))
return redirect('user_dashboard')
else:
ctx.update({'form': form})
return render(request, "petition/org_create.html", ctx)
form = OrgCreationForm()
ctx.update({'form': form})
return render(request, "petition/org_create.html", ctx)
def slug_show_petition(request, orgslugname=None, username=None, petitionname=None):
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
if orgslugname:
try:
org = Organization.objects.get(slugname=orgslugname)
slug = SlugModel.objects.get(slug=petitionname, petition__org=org)
except (Organization.DoesNotExist, SlugModel.DoesNotExist):
raise Http404(_("Sorry, we are not able to find this petition"))
petition = slug.petition
else:
try:
user = PytitionUser.objects.get(user__username=username)
slug = SlugModel.objects.get(slug=petitionname, petition__user=user)
except PytitionUser.DoesNotExist:
raise Http404(_("Sorry, we are not able to find this petition"))
except SlugModel.DoesNotExist:
raise Http404(_("Sorry, we are not able to find this petition"))
petition = slug.petition
sign_form = SignatureForm(petition=petition)
ctx = {"user": pytitionuser, "petition": petition, "form": sign_form,
'meta': petition_detail_meta(request, petition.id)}
return render(request, "petition/petition_detail.html", ctx)
@login_required
def add_new_slug(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist (anymore?)."))
return redirect("user_dashboard")
if request.method == "POST":
slugtexts = request.POST.getlist('slugtext', '')
if slugtexts == '' or slugtexts == []:
messages.error(request, _("You entered an empty slug text"))
else:
if petition.is_allowed_to_edit(pytitionuser):
for slugtext in slugtexts:
try:
petition.add_slug(slugtext)
petition.save()
messages.success(request, _("Successful addition of the slug '{}'!".format(slugtext)))
except IntegrityError:
messages.error(request, _("The slug '{}' already exists!".format(slugtext)))
except ValidationError as v:
for message in v.messages:
messages.error(request, message)
else:
messages.error(request, _("You don't have the permission to modify petitions"))
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
else:
return redirect("user_dashboard")
@login_required
def del_slug(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist (anymore?)."))
return redirect("user_dashboard")
if petition.is_allowed_to_edit(pytitionuser):
slug_id = request.GET.get('slugid', None)
if not slug_id:
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
slug = SlugModel.objects.get(pk=slug_id)
petition.del_slug(slug)
petition.save()
messages.success(request, _("Successful deletion of a slug"))
else:
messages.error(request, _("You don't have the permission to modify petitions"))
if petition.owner_type == "org":
return redirect("org_dashboard", petition.owner.slugname)
else:
return redirect("user_dashboard")
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
|
real = complex(1, 1).real
imag = complex(1, 1).imag
print(real, imag)
|
"""Unit tests for help command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.command import Command
import gslib.tests.testcase as testcase
class HelpUnitTests(testcase.GsUtilUnitTestCase):
"""Help command unit test suite."""
def test_help_noargs(self):
stdout = self.RunCommand('help', return_stdout=True)
self.assertIn(b'Available commands', stdout)
def test_help_subcommand_arg(self):
stdout = self.RunCommand('help', ['web', 'set'], return_stdout=True)
self.assertIn(b'gsutil web set', stdout)
self.assertNotIn(b'gsutil web get', stdout)
def test_help_invalid_subcommand_arg(self):
stdout = self.RunCommand('help', ['web', 'asdf'], return_stdout=True)
self.assertIn(b'help about one of the subcommands', stdout)
def test_help_with_subcommand_for_command_without_subcommands(self):
stdout = self.RunCommand('help', ['ls', 'asdf'], return_stdout=True)
self.assertIn(b'has no subcommands', stdout)
def test_help_command_arg(self):
stdout = self.RunCommand('help', ['ls'], return_stdout=True)
self.assertIn(b'ls - List providers, buckets', stdout)
def test_command_help_arg(self):
stdout = self.RunCommand('ls', ['--help'], return_stdout=True)
self.assertIn(b'ls - List providers, buckets', stdout)
def test_subcommand_help_arg(self):
stdout = self.RunCommand('web', ['set', '--help'], return_stdout=True)
self.assertIn(b'gsutil web set', stdout)
self.assertNotIn(b'gsutil web get', stdout)
def test_command_args_with_help(self):
stdout = self.RunCommand('cp', ['foo', 'bar', '--help'], return_stdout=True)
self.assertIn(b'cp - Copy files and objects', stdout)
class HelpIntegrationTests(testcase.GsUtilIntegrationTestCase):
"""Help command integration test suite."""
def test_help_wrong_num_args(self):
stderr = self.RunGsUtil(['cp'], return_stderr=True, expected_status=1)
self.assertIn('Usage:', stderr)
def test_help_runs_for_all_commands(self):
# This test is particularly helpful because the `help` command can fail
# under unusual circumstances (e.g. someone adds a new command and they make
# the "one-line" summary longer than the defined character limit).
for command in Command.__subclasses__():
# Raises exception if the exit code is non-zero.
self.RunGsUtil(['help', command.command_spec.command_name])
|
try: import cPickle as pickle
except: import pickle
from gem.evaluation import metrics
from gem.utils import evaluation_util, graph_util
import networkx as nx
import numpy as np
def evaluateStaticGraphReconstruction(digraph, graph_embedding,
X_stat, node_l=None, file_suffix=None,
sample_ratio_e=None, is_undirected=True,
is_weighted=False):
node_num = len(digraph.nodes)
# evaluation
if sample_ratio_e:
eval_edge_pairs = evaluation_util.getRandomEdgePairs(
node_num,
sample_ratio_e,
is_undirected
)
else:
eval_edge_pairs = None
if file_suffix is None:
estimated_adj = graph_embedding.get_reconstructed_adj(X_stat, node_l)
else:
estimated_adj = graph_embedding.get_reconstructed_adj(
X_stat,
file_suffix,
node_l
)
predicted_edge_list = evaluation_util.getEdgeListFromAdjMtx(
estimated_adj,
is_undirected=is_undirected,
edge_pairs=eval_edge_pairs
)
MAP = metrics.computeMAP(predicted_edge_list, digraph, is_undirected=is_undirected)
prec_curv, _ = metrics.computePrecisionCurve(predicted_edge_list, digraph)
# If weighted, compute the error in reconstructed weights of observed edges
if is_weighted:
digraph_adj = nx.to_numpy_matrix(digraph)
estimated_adj[digraph_adj == 0] = 0
err = np.linalg.norm(digraph_adj - estimated_adj)
err_baseline = np.linalg.norm(digraph_adj)
else:
err = None
err_baseline = None
return (MAP, prec_curv, err, err_baseline)
def expGR(digraph, graph_embedding,
X, n_sampled_nodes, rounds,
res_pre, m_summ,
is_undirected=True):
print('\tGraph Reconstruction')
summ_file = open('%s_%s.grsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
if len(digraph.nodes) <= n_sampled_nodes:
rounds = 1
MAP = [None] * rounds
prec_curv = [None] * rounds
err = [None] * rounds
err_b = [None] * rounds
n_nodes = [None] * rounds
n_edges = [None] * rounds
for round_id in range(rounds):
sampled_digraph, node_l = graph_util.sample_graph(
digraph,
n_sampled_nodes=n_sampled_nodes
)
n_nodes[round_id] = len(sampled_digraph.nodes)
n_edges[round_id] = len(sampled_digraph.edges)
print('\t\tRound: %d, n_nodes: %d, n_edges:%d\n' % (round_id,
n_nodes[round_id],
n_edges[round_id]))
sampled_X = X[node_l]
MAP[round_id], prec_curv[round_id], err[round_id], err_b[round_id] = \
evaluateStaticGraphReconstruction(sampled_digraph, graph_embedding,
sampled_X, node_l,
is_undirected=is_undirected)
try:
summ_file.write('Err: %f/%f\n' % (np.mean(err), np.std(err)))
summ_file.write('Err_b: %f/%f\n' % (np.mean(err_b), np.std(err_b)))
except TypeError:
pass
summ_file.write('%f/%f\t%s\n' % (np.mean(MAP), np.std(MAP),
metrics.getPrecisionReport(prec_curv[0],
n_edges[0])))
pickle.dump([n_nodes,
n_edges,
MAP,
prec_curv,
err,
err_b],
open('%s_%s.gr' % (res_pre, m_summ), 'wb'))
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField()),
('site_ended', models.CharField(choices=[('T', 'Terrorists'), ('CT', 'Counter-Terrorists')], max_length=255)),
('rounds_for', models.IntegerField()),
('rounds_against', models.IntegerField()),
],
),
migrations.CreateModel(
name='GamePlayer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kills', models.IntegerField()),
('assists', models.IntegerField()),
('deaths', models.IntegerField()),
('mvps', models.IntegerField()),
('points', models.IntegerField()),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stats.Game')),
],
),
migrations.CreateModel(
name='Map',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map_name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=255)),
('rank', models.CharField(blank=True, choices=[(1, 'Silver I'), (2, 'Silver II'), (3, 'Silver III'), (4, 'Silver IV'), (5, 'Silver Elite'), (6, 'Silver Elite Master'), (7, 'Gold Nova I'), (8, 'Gold Nova II'), (9, 'Gold Nova III'), (10, 'Gold Nova Master'), (11, 'Master Guardian I'), (12, 'Master Guardian II'), (13, 'Master Guardian Elite'), (14, 'Distinguished Master Guardian'), (15, 'Legendary Eagle'), (16, 'Legendary Eagle Master'), (17, 'Supreme Master First Class'), (18, 'The Global Elite')], max_length=255, null=True)),
],
),
migrations.AddField(
model_name='gameplayer',
name='player',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stats.Player'),
),
migrations.AddField(
model_name='game',
name='game_map',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stats.Map'),
),
migrations.AddField(
model_name='game',
name='players',
field=models.ManyToManyField(through='stats.GamePlayer', to='stats.Player'),
),
]
|
import numpy as np
import theano
import theano.tensor as T
class GradientOptimizer:
def __init__(self, lr):
self.lr = lr
def __call__(self, cost, params):
pass
@property
def learningRate(self):
return self.lr
@learningRate.setter
def learningRate(self, i):
self.lr = i
class RMSprop(GradientOptimizer):
def __init__(self, lr=0.01, rho=0.9, epsilon=1e-6):
super(RMSprop, self).__init__(lr)
self.rho = rho
self.epsilon = epsilon
def __call__(self, cost, params):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + self.epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - self.lr * g))
return updates
class Adam(GradientOptimizer):
def __init__(self, lr=0.01, beta1=0.9, beta2=0.999, epsilon=1e-7):
super(Adam, self).__init__(lr)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def __call__(self, cost, params):
grads = T.grad(cost=cost ,wrt=params)
updates = []
exp = theano.shared(np.float32(1.0),name='exp',borrow=True)
updates.append((exp, exp+1))
for p, g in zip(params, grads):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_new = self.beta1 * m + (1 - self.beta1) * g
v_new = self.beta2 * v + (1 - self.beta2) * g**2
mt = m_new / (1 - self.beta1**exp)
vt = v_new / (1 - self.beta2**exp)
updates.append((m, m_new))
updates.append((v, v_new))
updates.append((p, p - self.lr * mt / (T.sqrt(vt) + self.epsilon)))
return updates
class Momentum(GradientOptimizer):
def __init__(self, lr=0.01, mu=0.5):
super(Momentum, self).__init__(lr)
self.mu = mu
def __call__(self, cost, params):
grads = T.grad(cost=cost ,wrt=params)
updates = []
for p, g in zip(params, grads):
v = theano.shared(p.get_value() * 0.)
new_v = self.mu * v + self.lr * g
updates.append((v, new_v))
updates.append((p, p - new_v))
return updates
class Nesterov(GradientOptimizer):
def __init__(self, lr=0.01, mu=0.5):
super(Nesterov, self).__init__(lr)
self.mu = mu
def __call__(self, cost, params):
grads = T.grad(cost=cost ,wrt=params)
updates = []
for p, g in zip(params, grads):
v = theano.shared(p.get_value() * 0.)
new_v = self.mu * v + self.lr * theano.clone(g, replace = {p: p - self.mu * v})
updates.append((v, new_v))
updates.append((p, p - new_v))
return updates
class Adagrad(GradientOptimizer):
def __init__(self, lr=0.01, epsilon=1e-7):
super(Adagrad, self).__init__(lr)
self.epsilon = epsilon
def __call__(self, cost, params):
grads = T.grad(cost=cost ,wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = acc + g**2
updates.append((acc, acc_new))
updates.append((p, p - self.lr * g / T.sqrt(acc_new + self.epsilon)))
return updates
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.