text stringlengths 0 1.05M | meta dict |
|---|---|
# Adapted from test_file.py by Daniel Stutzbach
from __future__ import unicode_literals
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
from test.test_support import py3k_bytes as bytes
from test.script_helper import run_python
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(b"\x01\x02")
self.f.close()
a = array(b'b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read', 'readinto',
'seek', 'tell', 'truncate', 'write', 'seekable',
'readable', 'writable']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write('a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array(b'b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
# Skip test
return
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_surrogates(self):
# Issue #8438: try to open a filename containing surrogates.
# It should either fail because the file doesn't exist or the filename
# can't be represented using the filesystem encoding, but not because
# of a LookupError for the error handler "surrogateescape".
filename = u'\udc80.txt'
try:
with _FileIO(filename):
pass
except (UnicodeEncodeError, IOError):
pass
# Spawn a separate Python process with a different "file system
# default encoding", to exercise this further.
env = dict(os.environ)
env[b'LC_CTYPE'] = b'C'
_, out = run_python('-c', 'import _io; _io.FileIO(%r)' % filename, env=env)
if ('UnicodeEncodeError' not in out and
'IOError: [Errno 2] No such file or directory' not in out):
self.fail('Bad output: %r' % out)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| {
"repo_name": "MattDevo/edk2",
"path": "AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_fileio.py",
"copies": "9",
"size": "14236",
"license": "bsd-2-clause",
"hash": 6071421114717797000,
"line_mean": 30.801843318,
"line_max": 83,
"alpha_frac": 0.5191064906,
"autogenerated": false,
"ratio": 4.0523768858525475,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9071483376452548,
"avg_score": null,
"num_lines": null
} |
### Adapted from TF repo
import tensorflow as tf
from tensorflow import gradients
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
# grads = xs
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
grads_with_none = gradients(elemwise_products, xs)
return_grads = [
grad_elem if grad_elem is not None \
else tf.zeros_like(x) \
for x, grad_elem in zip(xs, grads_with_none)]
return return_grads
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
ValueError: if the arguments are invalid or not supported. Currently,
this function only supports one-dimensional `x` in `xs`.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute a hessian matrix for each x in xs
hessians = []
for i, x in enumerate(xs):
# Check dimensions
ndims = x.get_shape().ndims
if ndims is None:
raise ValueError('Cannot compute Hessian because the dimensionality of '
'element number %d of `xs` cannot be determined' % i)
elif ndims != 1:
raise ValueError('Computing hessians is currently only supported for '
'one-dimensional tensors. Element number %d of `xs` has '
'%d dimensions.' % (i, ndims))
with ops.name_scope(name + '_first_derivative'):
# Compute the partial derivatives of the input with respect to all
# elements of `x`
_gradients = tf.gradients(ys, x, **kwargs)[0]
# Unpack the gradients into a list so we can take derivatives with
# respect to each element
_gradients = array_ops.unpack(_gradients)
with ops.name_scope(name + '_second_derivative'):
# Compute the partial derivatives with respect to each element of the list
_hess = [tf.gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
# Pack the list into a matrix and add to the list of hessians
hessians.append(array_ops.pack(_hess, name=name))
return hessians | {
"repo_name": "kohpangwei/influence-release",
"path": "influence/hessians.py",
"copies": "1",
"size": "5137",
"license": "mit",
"hash": -2683368139492086300,
"line_mean": 40.7723577236,
"line_max": 83,
"alpha_frac": 0.6893128285,
"autogenerated": false,
"ratio": 3.8595041322314048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9970558055354241,
"avg_score": 0.01565178107543281,
"num_lines": 123
} |
bl_info = {
"name": "Add-on Template",
"author": "Alex Martinelli",
"location": "View3D > Tools > Simple Addon",
"version": (1, 0, 0),
"blender": (2, 7, 8),
"description": "Template",
"category": "Development"
}
import bpy
# Panel takes care of the UI components
class SimplePanel(bpy.types.Panel):
# Hierarchically define location of the add-on in the Blender UI
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "Test Add-On"
bl_label = "Template"
# define panel UI components
def draw(self, context):
# sample button
self.layout.operator("object.simple_operator",
text="Template operator")
# sample int value
self.layout.prop(context.scene, 'my_int_prop')
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
# Register properties related to the class here.
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Delete parameters related to the class here
# Operator is the actual logic behind the add-of
class SimpleOperator(bpy.types.Operator):
bl_idname = "object.simple_operator"
bl_label = "Template"
def execute(self, context):
# example of adding a monkey to the scene
bpy.ops.mesh.primitive_monkey_add(
radius=context.scene.my_int_prop,
location=(0, 0, 0))
# better to return this string when done with the execution work
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered operator: %s " % cls.bl_label)
# Register properties related to the class here
bpy.types.Scene.my_int_prop = bpy.props.IntProperty(name="My Int",
description="Sample integer property to print to user",
default=123,
min=100,
max=200)
@classmethod
def unregister(cls):
print("Unregistered operator: %s " % cls.bl_label)
# Delete parameters related to the class here
def register():
# Implicitly register objects inheriting bpy.types in current file and scope
#bpy.utils.register_module(__name__)
# Or explicitly register objects
bpy.utils.register_class(SimpleOperator)
bpy.utils.register_class(SimplePanel)
print("%s registration complete\n" % bl_info.get('name'))
def unregister():
# Always unregister in reverse order to prevent error due to
# interdependencies
# Explicitly unregister objects
bpy.utils.unregister_class(SimpleOperator)
bpy.utils.unregister_class(SimplePanel)
# Or unregister objects inheriting bpy.types in current file and scope
#bpy.utils.unregister_module(__name__)
print("%s unregister complete\n" % bl_info.get('name'))
# Called only when running the script from Blender
# when distributed as plugin register() and unregister() are used
if __name__ == "__main__":
try:
unregister()
except Exception as e:
print(e)
pass
register()
| {
"repo_name": "5agado/data-science-learning",
"path": "graphics/blender_addon_template.py",
"copies": "1",
"size": "3418",
"license": "apache-2.0",
"hash": 6293365353858813000,
"line_mean": 29.7927927928,
"line_max": 115,
"alpha_frac": 0.6091281451,
"autogenerated": false,
"ratio": 4.20935960591133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002061904167871845,
"num_lines": 111
} |
# adapted from the following examples:
# - the video capture example http://processing.org/reference/libraries/video/Capture.html
# - the blob detection example http://www.v3ga.net/processing/BlobDetection/index-page-download.html
# - the video background subtraction example https://github.com/processing/processing/tree/master/java/libraries/video/examples/Capture/BackgroundSubtraction
from processing.video import Capture
from blobDetection import BlobDetection
def setup():
global cam, theBlobDetection, numPixels, backgroundPixels, newFrame, mode, bg, img
mode = 0
size(640/2, 480/2)
cameras = Capture.list()
if len(cameras) == 0:
print("There are no cameras available for capture.")
exit()
else:
print("Available cameras:")
for i,camera in enumerate(cameras):
print(i,camera)
# The camera can be initialized directly using an
# element from the array returned by list():
cam = Capture(this, cameras[7])
cam.start()
newFrame = False # is the frame ready yet?
# BlobDetection
img = PImage(width,height)
bg = PImage(width,height)
theBlobDetection = BlobDetection(img.width, img.height)
theBlobDetection.setPosDiscrimination(True)
theBlobDetection.setThreshold(0.2) # will detect bright areas whose luminosity > 0.2f;
# Background subtraction
numPixels = width * height;
backgroundPixels = [0]*numPixels # Create a list to store the background image
loadPixels();
def draw():
global newFrame
if cam.available():
cam.read()
newFrame = True
if newFrame:
newFrame = False;
if mode == 0:
image(bg,0,0,width,height)
text("bg image", 20, 20)
elif mode == 1:
image(cam,0,0,width,height)
text("camera image", 20, 20)
elif mode == 2:
bgSub()
img.loadPixels()
for i,pix in enumerate(pixels):
img.pixels[i] = pixels[i]
img.updatePixels()
fastblur(img, 2)
theBlobDetection.computeBlobs(img.pixels)
drawBlobsAndEdges(True,True)
# image(cam, 0, 0)
text("with bg subtracted", 20, 20)
def bgSub():
# Difference between the current frame and the stored background
presenceSum = 0
for i in range(numPixels): # For each pixel in the video frame...
# Fetch the current color in that location, and also the color of the background in that spot
currColor = cam.pixels[i]
bkgdColor = backgroundPixels[i]
# Extract the red, green, and blue components of the current pixel?s color
currR = (currColor >> 16) & 0xFF
currG = (currColor >> 8) & 0xFF
currB = currColor & 0xFF
# Extract the red, green, and blue components of the background pixel?s color
bkgdR = (bkgdColor >> 16) & 0xFF
bkgdG = (bkgdColor >> 8) & 0xFF
bkgdB = bkgdColor & 0xFF
# Compute the difference of the red, green, and blue values
diffR = abs(currR - bkgdR)
diffG = abs(currG - bkgdG)
diffB = abs(currB - bkgdB)
# Add these differences to the running tally
presenceSum += diffR + diffG + diffB;
# Render the difference image to the screen
pixels[i] = color(diffR, diffG, diffB);
# The following line does the same thing much faster, but is more technical
#pixels[i] = 0xFF000000 | (diffR << 16) | (diffG << 8) | diffB;
updatePixels() # Notify that the pixels[] array has changed
#print(presenceSum) # Print out the total amount of movement
def keyPressed():
global mode, backgroundPixels
if(key == ' '):
cam.loadPixels()
for i,pix in enumerate(cam.pixels):
backgroundPixels[i] = cam.pixels[i]
bg.copy(cam, 0, 0, cam.width, cam.height,
0, 0, bg.width, bg.height)
else:
mode = (mode+1)%3
def drawBlobsAndEdges(drawBlobs, drawEdges):
noFill()
for n in range(theBlobDetection.getBlobNb()):
b=theBlobDetection.getBlob(n)
if b != None:
if drawEdges:
strokeWeight(3)
stroke(0,255,0)
for m in range(b.getEdgeNb()):
eA = b.getEdgeVertexA(m)
eB = b.getEdgeVertexB(m)
if (eA != None and eB != None):
line(
eA.x*width, eA.y*height,
eB.x*width, eB.y*height
)
# Blobs
if drawBlobs:
strokeWeight(1)
stroke(255,0,0)
rect(
b.xMin*width,b.yMin*height,
b.w*width,b.h*height
)
"""
// ==================================================
// Super Fast Blur v1.1
// by Mario Klingemann
// <http://incubator.quasimondo.com>
// ==================================================\
"""
def fastblur( img, radius):
if (radius<1):
return
w=img.width
h=img.height
wm=w-1
hm=h-1
wh=w*h
div=radius+radius+1
r = [0]*wh
g = [0]*wh
b = [0]*wh
vmin = [0]*max(w,h)
vmax = [0]*max(w,h)
pix = [0]*len(img.pixels)
dv = [0]*256*div
for i in range(256*div):
dv[i] = i/div
yw = 0
yi = 0
for y in range(h):
rsum=0
gsum=0
bsum=0
for i in range(-radius,radius+1):
p=pix[yi+min(wm,max(i,0))]
rsum +=(p & 0xff0000)>>16
gsum +=(p & 0x00ff00)>>8
bsum += p & 0x0000ff
for x in range(w):
r[yi]=dv[rsum]
g[yi]=dv[gsum]
b[yi]=dv[bsum]
if(y==0):
vmin[x]=min(x+radius+1,wm)
vmax[x]=max(x-radius,0)
p1=pix[yw+vmin[x]]
p2=pix[yw+vmax[x]]
rsum+=((p1 & 0xff0000)-(p2 & 0xff0000))>>16
gsum+=((p1 & 0x00ff00)-(p2 & 0x00ff00))>>8
bsum+= (p1 & 0x0000ff)-(p2 & 0x0000ff)
yi+=1
yw+=w
for x in range(w):
rsum=0
gsum=0
bsum=0
yp=-radius*w
for i in range(-radius,radius+1):
yi = max(0,yp)+x
rsum+=r[yi]
gsum+=g[yi]
bsum+=b[yi]
yp+=w
yi=x
for y in range(h):
pix[yi]=0xff000000 | (dv[rsum]<<16) | (dv[gsum]<<8) | dv[bsum]
if(x==0):
vmin[y]=min(y+radius+1,hm)*w
vmax[y]=max(y-radius,0)*w
p1=x+vmin[y]
p2=x+vmax[y]
rsum+=r[p1]-r[p2]
gsum+=g[p1]-g[p2]
bsum+=b[p1]-b[p2]
yi+=w
| {
"repo_name": "jeisenma/ProgrammingConcepts",
"path": "14-libraries/blobs.py",
"copies": "1",
"size": "5601",
"license": "bsd-3-clause",
"hash": 7941896664336055000,
"line_mean": 25.6714285714,
"line_max": 157,
"alpha_frac": 0.6391715765,
"autogenerated": false,
"ratio": 2.627110694183865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3766282270683865,
"avg_score": null,
"num_lines": null
} |
# adapted from the Keras LSTM text generation example
# basic imports
import numpy as np
import matplotlib.pyplot as plt
# load FsPeptide simulation data, featurize
from msmbuilder.example_datasets import FsPeptide
from msmbuilder.featurizer import DihedralFeaturizer
fs = FsPeptide().get().trajectories
n_atoms = fs[0].n_atoms
fs_dih_feat = DihedralFeaturizer().transform(fs)
# cut the sequence into semi-redundant sequences of maxlen steps
# given sequential frames i..i+maxlen as input
# redict the i+maxlen+1th frame
maxlen = 10
step = 3
sequences = []
next_frames = []
for ind,traj in enumerate(fs_dih_feat):
if ind <
for i in range(0, len(traj) - maxlen, step):
sequences.append(traj[i : i + maxlen])
next_frames.append(traj[i + maxlen])
print('nb sequences:', len(sequences))
ndim = fs_dih_feat[0].shape[1]
print('nb sequences:', ndim)
X = np.zeros((len(sequences), maxlen, ndim))
y = np.zeros((len(sequences), ndim))
for i, sequence in enumerate(sequences):
X[i] = sequences[i]
y[i] = next_frames[i]
# build an LSTM
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.optimizers import SGD
print('Build model...')
model = Sequential()
model.add(LSTM(ndim, ndim*2, return_sequences=True))
#model.add(Dropout(0.2))
model.add(LSTM(ndim*2, ndim*2, return_sequences=False))
#model.add(Dropout(0.2))
model.add(Dense(ndim*2, ndim))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
#
| {
"repo_name": "maxentile/md-metric-learning",
"path": "algorithms/lstm.py",
"copies": "1",
"size": "1587",
"license": "mit",
"hash": 823034044081042800,
"line_mean": 26.8421052632,
"line_max": 64,
"alpha_frac": 0.7246376812,
"autogenerated": false,
"ratio": 2.9943396226415095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42189773038415096,
"avg_score": null,
"num_lines": null
} |
# Standard scientific Python imports
#import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
import numpy
from PIL import Image
import os
import time
from datetime import datetime
#class Images:
NUM_FILES=30
class MyDataSet:
images = numpy.ndarray(shape=(3,3))
target = []
def find_gif_names(images_dir):
dirs = os.listdir(images_dir)
files = []
for d in dirs:
for f in os.listdir(os.path.join(images_dir, d)):
file_name = os.path.join(images_dir, d, f)
files.append(file_name)
return files
def read_image_file(file_name):
"""
Reads an image file from a file name.
Requires that it's a gif. Returns the grayscale of that gif
as a 2d array of pixels.
"""
im = Image.open(file_name)
ints = [ord(b) for b in im.tobytes()]
return ints
def read_images(file_names, n_samples):
images = []
for file_name in file_names[0:n_samples]:
#yield read_image_file(file_name)
images.append(read_image_file(file_name))
return images
def read_gif_names(csv_file_name, tag_index, n_samples):
gif_files = []
with open(csv_file_name) as f:
csv_data = f.read()
lines = csv_data.split('\n')
tags = {}
num_lines = 0
for l in lines:
if num_lines == 0:
num_lines += 1
continue
num_lines += 1
vals = l.split(',')
filename = vals[0]
gif_files.append(filename)
return gif_files[0:n_samples]
def read_targets(csv_file_name, tag_index, gif_files, n_samples):
"""
csv file holds data like
f1 t1 t2 t3
f2 t1 t2 t3
where f1 and f2 are the file names, and ti holds the whether
the ith tag is 0 or 1 for the file f1, f2
gif_file_order gives the order that the files are given in the array of
data inputs to the ML algo.
tag index is the column of the tag we're interested in.
"""
with open(csv_file_name) as f:
csv_data = f.read()
lines = csv_data.split('\n')
tags = {}
num_lines = 0
for l in lines:
if num_lines == 0:
num_lines += 1
continue
num_lines += 1
vals = l.split(',')
filename = vals[0]
classification=vals[1+tag_index]
if classification == "true":
classification = 1
else:
classification = 0
tags[filename] = classification
result = [tags[x] for x in gif_files if x in tags]
result = result[0:n_samples]
return result
raise ValueError("Couldn't parse data correctly")
def nasa_dataset(n_samples):
res = MyDataSet()
gif_names = read_gif_names("results.csv", 0, n_samples)
res.target = read_targets("results.csv", 14, gif_names, n_samples)
res.images = numpy.array(read_images(gif_names, n_samples))
return res
####################################################
# Begin.
#
curr_time = datetime.now()
#gifs = find_gif_names("images")
# The digits dataset
digits = nasa_dataset(NUM_FILES)
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
#for image in digits.images:
# print(image)
images_and_labels = list(zip(digits.images, digits.target))
#for index, (image, label) in enumerate(images_and_labels[:4]):
#plt.subplot(2, 4, index + 1)
#plt.axis('off')
#plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
#plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
try:
#print(digits.images[:n_samples / 2])
#print(digits.target[:n_samples / 2])
classifier.fit(digits.images[:n_samples / 2], digits.target[:n_samples / 2])
classifier.fit(digits.images[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print(expected)
print(predicted)
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
except:
print("finished with errors.")
time_finished = datetime.now()
print(curr_time)
print(time_finished)
raise
time_finished = datetime.now()
print(curr_time)
print(time_finished)
#for index, (image, prediction) in enumerate(images_and_predictions[:4]):
#plt.subplot(2, 4, index + 5)
#plt.axis('off')
#plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
#plt.title('Prediction: %i' % prediction)
#plt.show()
| {
"repo_name": "spacepirates/marchine_learning",
"path": "read_nasa_img.py",
"copies": "1",
"size": "5687",
"license": "apache-2.0",
"hash": -7054122301096947000,
"line_mean": 29.4117647059,
"line_max": 82,
"alpha_frac": 0.6272199754,
"autogenerated": false,
"ratio": 3.5213622291021673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9575160085290519,
"avg_score": 0.014684423842329599,
"num_lines": 187
} |
# adapted from the plugin_comments provided with web2py
import re
from gluon.tools import prettydate
from gluon.contrib.markdown.markdown2 import markdown
import requests
import os.path
import urllib
import bleach
from bleach.sanitizer import Cleaner
from datetime import datetime
import json
from opentreewebapputil import fetch_github_app_auth_token
from pprint import pprint
def error(): raise HTTP(404)
# builds an organized list (UL) with threaded comments
def SUL(*a,**b): return UL(*[u for u in a if u],**b)
# client-side script to manage comment display and form behavior
script=SCRIPT("""
var action = null;
var formhtml = null;
function delete_all_forms() {
jQuery('div.plugin_localcomments div.reply').each(function() {
if ($(this).closest('.issue').length === 0) {
// this is the space for new topics, with a separate link
$(this).html('<a class="btn btn-small reply" href="#">Add a new topic</a>');
} else {
// this is the prompt to add comments to an existing topic
$(this).html('<a class="btn btn-small reply" href="#">Add a comment</a>');
}
$(this).find('a.reply').unbind('click').click(function() {
$formHolder = $(this).parent();
delete_all_forms();
$formHolder.html(formhtml);
capture_form();
return false;
});
});
}
function capture_form() {
// bind and modify widgets
jQuery('div.plugin_localcomments a.msg-close').unbind('click').click(function(){
delete_all_forms();
return false;
});
// adjust UI for issues versus comments
var threadParentID = 0;
var $parentIssueContainer = $('.plugin_localcomments form').closest('li.issue');
if ($parentIssueContainer.length > 0) {
threadParentID = $parentIssueContainer.find('div:eq(0)').attr('id').split('r')[1];
}
var isThreadStarter = threadParentID == 0;
if (isThreadStarter) {
jQuery('div.plugin_localcomments select[name=feedback_type]').show();
jQuery('div.plugin_localcomments select[name=issue_title]').show();
} else {
jQuery('div.plugin_localcomments select[name=feedback_type]').hide();
jQuery('div.plugin_localcomments select[name=issue_title]').hide();
}
// always hide expertise checkbox and surrounding label (not currently needed)
jQuery('div.plugin_localcomments label.expertise-option').hide();
// show/hide some fields based on feedback type
var $referenceURLField = jQuery('div.plugin_localcomments input[name="reference_url"]');
$referenceURLField.hide();
jQuery('div.plugin_localcomments select[name="feedback_type"]').unbind('change').change(function(){
switch (jQuery(this).val()) {
case 'Suggest a phylogeny to incorporate':
$referenceURLField.attr('placeholder',"Provide a database reference or published article (URL or DOI)");
$referenceURLField.show();
break;
case 'Correction to relationships in the synthetic tree':
case 'Correction to names (taxonomy)':
case 'Extinct/extant issue (taxonomy)':
$referenceURLField.attr('placeholder',"Provide a supporting article or web site (URL or DOI)");
$referenceURLField.show();
break;
default:
$referenceURLField.attr('placeholder',"...");
$referenceURLField.hide();
}
});
// convert a "naked" DOI to an URL, where possible
$referenceURLField.unbind('blur').bind('blur', function() {
var $doiField = $(this);
$doiField.val( DOItoURL($doiField.val()) );
});
// update the Login link, if shown
if (typeof(fixLoginLinks) === 'function') {
fixLoginLinks();
}
function validateFeedbackForm(options) {
// Return true (if all inputs are valid), or false
if (!options) options = {VERBOSE: false};
var $form = $('div.plugin_localcomments form:eq(0)');
var prompt = "Please provide data for all visible fields";
var problemsFound = false;
// validate form fields based on feedback type
var $visitorNameField = $form.find('input[name="visitor_name"]');
if ($visitorNameField.is(':visible') && ($.trim($visitorNameField.val()) === '')) {
//prompt = "Please enter your name (and preferably an email address) so we can stay in touch.";
problemsFound = true;
}
var $fbTypeField = $form.find('select[name="feedback_type"]');
if ($fbTypeField.is(':visible') && ($.trim($fbTypeField.val()) === '')) {
//prompt = "Please choose a feedback type for this topic.";
problemsFound = true;
}
var $titleField = $form.find('input[name="issue_title"]');
if ($titleField.is(':visible') && ($.trim($titleField.val()) === '')) {
//prompt = "Please give this topic a title.";
problemsFound = true;
}
var $bodyField = $form.find('textarea[name="body"]');
if ($.trim($bodyField.val()) === '') {
//prompt = "Please enter some text for this "+ (isThreadStarter ? 'issue' : 'comment') +".";
problemsFound = true;
}
var $referenceURLField = $form.find('input[name="reference_url"]');
if ($referenceURLField.is(':visible') && ($.trim($referenceURLField.val()) === '')) {
//prompt = "Please provide a supporting reference (DOI or URL).";
problemsFound = true;
}
if (problemsFound && options.VERBOSE) {
// Show an alert to prompt corrective action
alert(prompt);
}
// return true only if all's well
return !(problemsFound);
}
function updateFeedbackButton(evt) {
var $btn = jQuery('div.plugin_localcomments form:eq(0) :submit');
if (validateFeedbackForm({VERBOSE: false})) {
$btn.removeClass('disabled');
} else {
$btn.addClass('disabled');
}
}
// update now, and after any change to input widgets
updateFeedbackButton();
jQuery('div.plugin_localcomments :input')
.unbind('change.validation keyup.validation')
.bind('change.validation keyup.validation', updateFeedbackButton);
jQuery('div.plugin_localcomments :submit').unbind('click').click(function(){
var $form = jQuery(this).closest('form');
if (!validateFeedbackForm({VERBOSE: true})) {
// something's wrong
return false;
}
if (window.opener) {
/* This window was opened by another page! Copy its URL to the 'url' field
* This is probably OneZoom, or our curation tool, or some other site using
* OpenTree data. We'll group these issues by URL.
*/
$form.find('input[name="url"]').val( window.opener.location.href );
}
jQuery.post(action,
{
////$'thread_parent_id': form.find('input[name="thread_parent_id"]').val(),
'issue_or_comment': (isThreadStarter ? 'issue' : 'comment'),
'thread_parent_id': threadParentID, ///$form.parent().prev().attr('id').split('r')[1],
'synthtree_id': $form.find('input[name="synthtree_id"]').val(),
'synthtree_node_id': $form.find('input[name="synthtree_node_id"]').val(),
'sourcetree_id': $form.find('input[name="sourcetree_id"]').val(),
'ottol_id': $form.find('input[name="ottol_id"]').val(),
'target_node_label': $form.find('input[name="target_node_label"]').val(),
'url': $form.find('input[name="url"]').val(),
'title': $form.find('input[name="issue_title"]').val(),
'body': $form.find('textarea[name="body"]').val(),
'feedback_type': $form.find('select[name="feedback_type"]').val(),
'reference_url': $referenceURLField.is(':visible') ? $form.find('input[name="reference_url"]').val() : '',
'claimed_expertise': $form.find(':checkbox[name="claimed_expertise"]').is(':checked'),
'visitor_name': $form.find('input[name="visitor_name"]').val(),
'visitor_email': $form.find('input[name="visitor_email"]').val()
},
function(data,r){
if(data) {
var $refreshArea;
if (isThreadStarter) {
$refreshArea = $form.parent().nextAll('ul');
// add the new comment (LI) to the top of the list
$refreshArea.prepend(data);
} else {
$refreshArea = $form.parent().prevAll('ul');
// add the new comment (LI) to the end of the list
$refreshArea.append(data);
}
$form.find('textarea[name="body"]').val('');
//$form.find('input[name="thread_parent_id"]').val('0');
plugin_localcomments_init();
delete_all_forms();
}
},
'html'
);
return false;
});
}
function plugin_localcomments_init() {
jQuery('div.plugin_localcomments .toggle').unbind('click').click(function(){
var $toggle = $(this);
var $parentIssue = $toggle.closest('li.issue');
var $collapsibleTargets = $parentIssue.find('ul').eq(0).add($parentIssue.find('div.reply'));
$collapsibleTargets.slideToggle(250); // duration in ms
if ($toggle.text().indexOf('Show') == -1) {
$toggle.text('Show/add comments');
} else {
$toggle.text('Hide comments');
}
return false;
});
jQuery('div.plugin_localcomments .delete').unbind('click').click(function(){
delete_all_forms();
var $commentDiv = jQuery(this).closest('.msg-wrapper');
var $msgItem = $commentDiv.closest('li');
var issueOrComment = ($msgItem.is('.issue') ? 'issue' : 'comment');
jQuery.post(
action, // WAS: action+'/delete',
{
'thread_parent_id': 'delete',
'comment_id': $commentDiv.attr('id').split('r')[1],
'issue_or_comment': issueOrComment
},
function(data,r){
$msgItem.fadeOut(function() {$(this).remove();});
}
);
return false;
});
}
jQuery(document).ready(function() {
action = jQuery('div.plugin_localcomments form').attr('action');
formhtml = jQuery('div.plugin_localcomments form').parent().html();
delete_all_forms(); // important! creates .reply buttons before init() below
plugin_localcomments_init();
});
""")
def moderation():
comments = db().select(db.plugin_localcomments_comment.ALL, orderby=~db.plugin_localcomments_comment.created_on) # =~ is DESCENDING ORDER
form = SQLFORM.factory(Field('tag_name'))
return dict(comments=comments, form=form)
def sqlform():
# comments = db().select(db.plugin_localcomments_comment.ALL, orderby=~db.plugin_localcomments_comment.created_on) # =~ is DESCENDING ORDER
form = SQLFORM(db.plugin_localcomments_comment)
return dict(form=form)
def show_type_icon(type):
iconClass = "icon-comment"
if type == 'Error in phylogeny':
iconClass = "icon-move"
elif type == 'Bug report':
iconClass = "icon-warning-sign"
elif type == 'Feature request':
iconClass = "icon-wrench"
elif type == 'Reply or general':
iconClass = "icon-comment"
return XML(I(_class=iconClass))
@auth.requires_membership(role='editor')
def grid():
db.plugin_localcomments_comment.intended_scope.readable = True
db.plugin_localcomments_comment.intended_scope.represent = lambda scope, row: scope and scope.capitalize() or XML(T('—'))
db.plugin_localcomments_comment.feedback_type.represent = lambda row, value: show_type_icon(value)
grid = SQLFORM.grid( db.plugin_localcomments_comment,
# formstyle controls only the Add/Edit forms for individual records! not the main grid :(
formstyle='bootstrap',
#formstyle='table3cols',
#formstyle='table2cols',
#formstyle='inline',
#formstyle='divs',
#formstyle='ul',
user_signature = False, # False means *anyone* can edit, delete, etc! if they can use the method (see @auth.requires above)
# editable=auth.has_membership(role='editor'), # use this instead of @auth above, to allow others to search all comments
create=False,
deletable=False, # we'll flip the hidden flag, but not truly delete..?
orderby=~db.plugin_localcomments_comment.created_on,
fields=[
#db.plugin_localcomments_comment.id,
db.plugin_localcomments_comment.feedback_type,
db.plugin_localcomments_comment.body,
db.plugin_localcomments_comment.url,
db.plugin_localcomments_comment.ottol_id,
db.plugin_localcomments_comment.synthtree_id,
db.plugin_localcomments_comment.synthtree_node_id,
db.plugin_localcomments_comment.created_on,
db.plugin_localcomments_comment.intended_scope,
],
headers = {
# NOTE the funky key format used here
'plugin_localcomments_comment.feedback_type' : 'Type',
}
# TODO: add "virtual field" to show compound locations (treeID@nodeID), and hide underlying fields?
# TODO: add custom rendering (via lambdas) for some fields, eg, icons for feedback_type:
# https://groups.google.com/forum/#!searchin/web2py/grid$20HTML/web2py/3KhSI4Ps5Tw/Ay4Nc0ti3g0J
# https://groups.google.com/forum/#!searchin/web2py/grid$20HTML/web2py/4-rgcM9FNcA/NFpIyZdj4OkJ
# http://web2py.com/books/default/chapter/29/06?search=represent#Record-representation
)
return locals()
def smartgrid():
# comments = db().select(db.plugin_localcomments_comment.ALL, orderby=~db.plugin_localcomments_comment.created_on) # =~ is DESCENDING ORDER
grid = SQLFORM.smartgrid(db.plugin_localcomments_comment)
return locals()
def index():
# this is a tricky function that does simple display, handles POSTed comments, moderation, etc.
# TODO: break this up into more sensible functions, and refactor
# display/markup generation to shared code?
synthtree_id = request.vars['synthtree_id']
synthtree_node_id = request.vars['synthtree_node_id']
sourcetree_id = request.vars['sourcetree_id']
ottol_id = request.vars['ottol_id']
target_node_label = request.vars['target_node_label']
url = request.vars['url'] or request.get('env').get('http_referer')
filter = request.vars['filter']
# if anonymous user submitted identifying information, remember it
visitor_name = request.vars['visitor_name']
if visitor_name:
session['visitor_name'] = visitor_name
visitor_email = request.vars['visitor_email']
if visitor_email:
session['visitor_email'] = visitor_email
issue_or_comment = request.vars['issue_or_comment']
thread_parent_id = request.vars['thread_parent_id'] # can be None
comment_id = request.vars['comment_id'] # used for some operations (eg, delete)
feedback_type = request.vars['feedback_type'] # used for new comments
reference_url = request.vars['reference_url'] # used for phylo corrections only
issue_title = request.vars['title'] # used for new issues (threads)
claims_expertise = request.vars['claimed_expertise'] # used for new comments
threads = [ ]
def node(comment):
##print("building node for comment id={0}...".format(comment.get('number', comment['id'])))
# preload its comments (a separate API call)
child_comments = [ ]
if comment.get('comments') and comment.get('comments') > 0:
get_children_url = comment['comments_url']
resp = requests.get( get_children_url, headers=GH_GET_HEADERS, timeout=10)
# N.B. Timeout is in seconds, and watches for *any* new data within that time (vs. whole response)
try:
resp.raise_for_status()
try:
child_comments = resp.json()
except:
child_comments = resp.json
except:
# WE need logging in the web app!
try:
import sys
sys.stderr.write('Error: got a {c} from {u}\n'.format(c=resp.status_code,
u=get_children_url))
except:
pass # well that sucks, we failed to even write to stderr
metadata = parse_comment_metadata(comment['body'])
##print(metadata)
# Examine the comment metadata (if any) to get the best display name
# and URL for its author. Guests should appear here as the name and
# email address they entered when creating a comment, rather than the
# GitHub app (bot).
#
# Default values are what we can fetch from the issues API
author_display_name = comment['user']['login']
author_link = comment['user']['html_url']
# Now let's try for something more friendly...
if metadata:
meta_author_info = metadata.get('Author', None)
if meta_author_info:
# Try to parse this fron a Markdown hyperlink. Typical values include:
# u'opentreeapi'
# u'11'
# u'[Jim Allman](https://github.com/jimallman)'
# u'[John Smith](mailto:example.guest@gmail.com)'
regex = re.compile(r'\[(.*)\]\((.*)\)')
markdown_fields = regex.findall(meta_author_info)
if len(markdown_fields) > 0:
# look for parts of a markdown link
author_display_name, author_link = markdown_fields[0]
else:
# it's not a markdown link, just a bare name or numeric userid
if meta_author_info.isdigit():
# ignore ugly userid (login is better)
pass
else:
author_display_name = meta_author_info
# Is this node for an issue (thread starter) or a comment (reply)?
issue_node = 'number' in comment
# Is the current user logged in? If so, what is their GitHub ID (login)?
current_user_id = auth.user and auth.user.github_login or None
# Cook up some reasonably strong regular expressions to detect bare
# URLs and wrap them in hyperlinks. Adapted from
# http://stackoverflow.com/questions/1071191/detect-urls-in-a-string-and-wrap-with-a-href-tag
link_regex = re.compile( r'''
(?x)( # verbose identify URLs within text
(http|https) # make sure we find a resource type
:// # ...needs to be followed by colon-slash-slash
(\w+[:.]?){2,} # at least two domain groups, e.g. (gnosis.)(cx)
(/?| # could be just the domain name (maybe w/ slash)
[^ \n\r"]+ # or stuff then space, newline, tab, quote
[\w/]) # resource name ends in alphanumeric or slash
(?=([\s\.,>)'"\]]|$)) # assert: followed by white or clause ending OR end of line
) # end of match group
''')
# link_replace = r'<a href="\1" />\1</a>'
# let's try this do-nothing version
link_replace = r'\1'
# NOTE the funky constructor required to use this below
# Define a consistent cleaner to sanitize user input. We need a few
# elements that are common in our markdown but missing from the Bleach
# whitelist.
# N.B. HTML comments are stripped by default. Non-allowed tags will appear
# "naked" in output, so we can identify any bad actors.
common_feedback_tags = [u'p', u'br',
u'h1', u'h2', u'h3', u'h4', u'h5', u'h6',
]
ot_markdown_tags = list(set( bleach.sanitizer.ALLOWED_TAGS + common_feedback_tags))
ot_cleaner = Cleaner(tags=ot_markdown_tags)
try: # TODO: if not comment.deleted:
# N.B. some missing information (e.g. supporting URL) will appear here as a string like "None"
supporting_reference_url = metadata.get('Supporting reference', None)
has_supporting_reference_url = supporting_reference_url and (supporting_reference_url != u'None')
# Prepare a sanitized rendering of this user-submitted markup
rendered_comment_markdown = markdown(
get_visible_comment_body(comment['body'] or ''),
extras={'link-patterns':None},
link_patterns=[(link_regex, link_replace)]).encode('utf-8')
safe_comment_markup = XML(
ot_cleaner.clean(rendered_comment_markdown),
sanitize=False) # gluon's sanitize will break on Unicode!
markup = LI(
DIV(##T('posted by %(first_name)s %(last_name)s',comment.created_by),
# not sure why this doesn't work... db.auth record is not a mapping!?
('title' in comment) and DIV( comment['title'], A(T('on GitHub'), _href=comment['html_url'], _target='_blank'), _class='topic-title') or '',
DIV( safe_comment_markup, _class=(issue_node and 'body issue-body' or 'body comment-body')),
DIV( A(T('Supporting reference (opens in a new window)'), _href=supporting_reference_url, _target='_blank'), _class='body issue-supporting-reference' ) if has_supporting_reference_url else '',
DIV(
A(T(author_display_name), _href=author_link, _target='_blank'),
# SPAN(' [local expertise]',_class='badge') if comment.claimed_expertise else '',
SPAN(' ',metadata.get('Feedback type'),' ',_class='badge') if metadata.get('Feedback type') else '',
T(' - %s',prettydate(utc_to_local(datetime.strptime(comment['created_at'], GH_DATETIME_FORMAT)),T)),
SPAN(
issue_node and A(T(child_comments and 'Hide comments' or 'Show/add comments'),_class='toggle',_href='#') or '',
issue_node and comment['user']['login'] == current_user_id and SPAN(' | ') or '',
A(T('Delete'),_class='delete',_href='#') if comment['user']['login'] == current_user_id else '',
_class='controls'),
_class='byline'),
_id='r%s' % comment.get('number', comment['id']),
_class='msg-wrapper'),
# child messages (toggle hides/shows these)
issue_node and SUL(*[node(comment) for comment in child_comments], _style=("" if child_comments else "display: none;")) or '',
issue_node and DIV(_class='reply', _style=("" if child_comments else "display: none;")) or '',
_class=(issue_node and 'issue' or 'comment'))
return markup
except:
import sys
print "Unexpected error:", sys.exc_info()[0]
raise
if thread_parent_id == 'delete':
# delete the specified comment or close an issue...
try:
if issue_or_comment == 'issue':
print("CLOSING ISSUE {0}".format(comment_id))
close_issue(comment_id)
clear_local_comments()
return 'closed'
else:
print("DELETING COMMENT {0}".format(comment_id))
delete_comment(comment_id)
clear_local_comments()
return 'deleted'
except:
clear_local_comments() # hopefully a cleaner result
return error()
elif thread_parent_id:
# add a new comment using the submitted vars
if not request.vars.body:
print('MISSING BODY:')
print(request.vars.body)
return error()
if not (visitor_name or auth.user):
print('MISSING USER-ID:')
print(' visitor_name:')
print(visitor_name)
print(' auth.user:')
print(auth.user)
return error()
# build useful links for some footer fields
if auth.user:
author_link = '[{0}]({1})'.format(auth.user.name, auth.user.github_url)
elif visitor_name and visitor_email:
author_link = '[{0}](mailto:{1})'.format(visitor_name, visitor_email)
elif visitor_name:
# no email provided
author_link = visitor_name
elif visitor_email:
# no name provided
author_link = '[{0}](mailto:{1})'.format(visitor_email, visitor_email)
else:
# no identifying information provided
author_link = 'Anonymous'
if (thread_parent_id == '0'):
# create a new issue (thread starter)
msg_body = request.vars.body
if len(re.compile('\s+').sub('',msg_body))<1:
return ''
# more useful links for some footer fields
if url.startswith('http'):
# repeat full (absolute) URL as link text
url_link = '[{0}]({1})'.format(url, url)
else:
# expand hidden link for root-relative URL
url_link = '[{0}]({1}{2})'.format(url, request.get('env').get('http_origin'), url)
# add full metadata for an issue
footer = build_comment_metadata_footer(comment_type='starter', metadata={
"Author": author_link,
"Upvotes": 0,
"URL": url_link,
"Target node label": target_node_label,
"Synthetic tree id": synthtree_id,
"Synthetic tree node id": synthtree_node_id,
"Source tree id": sourcetree_id,
"Open Tree Taxonomy id": ottol_id,
"Supporting reference": reference_url or 'None'
})
msg_data = {
"title": issue_title,
"body": "{0}\n{1}".format(msg_body, footer),
"labels": [ ]
}
if feedback_type:
# omit an empty value here!
msg_data['labels'].append(feedback_type)
new_msg = add_or_update_issue(msg_data)
else:
# attach this comment to an existing issue
##print("ADD A COMMENT")
msg_body = request.vars.body
if len(re.compile('\s+').sub('',msg_body))<1:
return ''
# add abbreviated metadata for a comment
footer = build_comment_metadata_footer(comment_type='reply', metadata={
"Author" : author_link,
"Upvotes" : 0,
})
print(footer)
msg_data = {
"body": "{0}\n{1}".format(msg_body, footer)
}
new_msg = add_or_update_comment(msg_data, parent_issue_id=thread_parent_id)
clear_local_comments()
return node(new_msg)
# retrieve related comments, based on the chosen filter
if filter == 'skip_comments':
# sometimes we just want the markup/UI (eg, an empty page that's quickly updated by JS)
comments = [ ]
elif filter == 'synthtree_id,synthtree_node_id':
comments = get_local_comments({
"Synthetic tree id": synthtree_id,
"Synthetic tree node id": synthtree_node_id})
elif filter == 'sourcetree_id':
comments = get_local_comments({"Source tree id(s)": sourcetree_id})
elif filter == 'ottol_id':
comments = get_local_comments({"Open Tree Taxonomy id": ottol_id})
else: # fall back to url
if 'parentWindowURL=' in url:
#pprint("=== EXTRACTING parentWindowURL...")
try:
from urllib import unquote_plus
except ImportError:
from urllib.parse import unquote_plus
# capture the absolute URL of a parent window (i.e. from OneZoom or the study-curation app)
raw_qs_value = url.split('parentWindowURL=')[1];
#pprint("=== raw_qs_value: %s" % raw_qs_value)
url = unquote_plus(raw_qs_value) # decode to a proper URL
#pprint("=== NEW url: %s" % url)
comments = get_local_comments({"URL": url})
#pprint(comments)
for comment in comments:
#thread[comment.thread_parent_id] = thread.get(comment.thread_parent_id,[])+[comment]
threads.append(comment)
return DIV(script,
DIV(FORM(# anonymous users should see be encouraged to login or add a name-or-email to their comments
'' if auth.user_id else A(T('Login'),_href=URL(r=request,c='default',f='user',args=['login']),_class='login-logout reply'),
'' if auth.user_id else T(' or '),
'' if auth.user_id else INPUT(_type='text',_id='visitor_name',_name='visitor_name',_value=session.get('visitor_name',''),_placeholder="Enter your name"),
'' if auth.user_id else T(' '),
'' if auth.user_id else INPUT(_type='text',_id='visitor_email',_name='visitor_email',_value=session.get('visitor_email',''),_placeholder="Your email (visible on GitHub)"),
'' if auth.user_id else BR(),
SELECT(
OPTION('What kind of feedback is this?', _value=''),
OPTION('General feedback'),
OPTION('Correction to relationships in the synthetic tree'),
OPTION('Suggest a phylogeny to incorporate'),
OPTION('Correction to names (taxonomy)'),
OPTION('Extinct/extant issue (taxonomy)'),
OPTION('Bug report (website behavior)'),
OPTION('New feature request'),
_name='feedback_type',value='',_style='width: 100%; margin-right: -4px;'),
LABEL(INPUT(_type='checkbox',_name=T('claimed_expertise')), T(' I claim expertise in this area'),_style='float: right;',_class='expertise-option'),
INPUT(_type='text',_id='issue_title',_name='issue_title',_value='',_placeholder="Give this topic a title"), # should appear for proper issues only
TEXTAREA(_name='body',_placeholder="Add more to this topic, using Markdown (click 'Markdown help' below to learn more)."),
INPUT(_type='text',_id='reference_url',_name='reference_url',_value='',_placeholder="..."), # visibility (and placeholder) depends on feedback type
INPUT(_type='hidden',_name='synthtree_id',_value=synthtree_id),
INPUT(_type='hidden',_name='synthtree_node_id',_value=synthtree_node_id),
INPUT(_type='hidden',_name='sourcetree_id',_value=sourcetree_id),
INPUT(_type='hidden',_name='ottol_id',_value=ottol_id),
INPUT(_type='hidden',_name='target_node_label',_value=target_node_label),
INPUT(_type='hidden',_name='url',_value=url),
# INPUT(_type='text',_name='thread_parent_id',_value=0), # we'll get this from a nearby id, eg 'r8'
DIV(A(T('Close'),_class='msg-close',_href='#',_style='margin-right: 6px'),
SPAN(' | ',_style='margin-right: 6px'),
A(T('Markdown help'),_href='https://help.github.com/articles/markdown-basics',
_target='_blank',_style='margin-right: 10px'),
INPUT(_type='submit',_value=T('Post'),_class='btn btn-info btn-small',_style=''),
_class='msg-footer'),
_method='post',_action=URL(r=request,args=[])),_class='reply'),
SUL(*[node(comment) for comment in threads]),_class='plugin_localcomments')
#
# Perform basic CRUD for local comments, using GitHub Issues API
#
GH_BASE_URL = 'https://api.github.com'
# if the current user is logged in, use their auth token instead
USER_AUTH_TOKEN = auth.user and auth.user.github_auth_token or None
# Specify the media-type from GitHub, to freeze v3 API responses and get
# the comment body as markdown (vs. plaintext or HTML)
PREFERRED_MEDIA_TYPE = 'application/vnd.github.v3.raw+json, application/vnd.github.machine-man-preview+json'
# to get markdown AND html body, use 'application/vnd.github.v3.full+json'
GH_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
if USER_AUTH_TOKEN:
auth_header_value = 'token %s' % USER_AUTH_TOKEN
else:
GITHUB_APP_INSTALLATION_TOKEN = fetch_github_app_auth_token(request)
auth_header_value = 'token %s' % GITHUB_APP_INSTALLATION_TOKEN
GH_GET_HEADERS = {'Authorization': auth_header_value,
'Accept': PREFERRED_MEDIA_TYPE}
GH_POST_HEADERS = {'Authorization': auth_header_value,
'Content-Type': 'application/json',
'Accept': PREFERRED_MEDIA_TYPE}
def add_or_update_issue(msg_data, issue_id=None):
# WATCH for accidental creation of bogus labels!
if issue_id:
# edit an existing issue via the GitHub API
url = '{0}/repos/OpenTreeOfLife/feedback/issues/{1}'.format(GH_BASE_URL)
resp = requests.patch( url,
headers=GH_POST_HEADERS,
data=json.dumps(msg_data)
)
else:
# create a new issue
url = '{0}/repos/OpenTreeOfLife/feedback/issues'.format(GH_BASE_URL)
resp = requests.post( url,
headers=GH_POST_HEADERS,
data=json.dumps(msg_data)
)
try:
new_msg = resp.json()
except:
new_msg = resp.json
resp.raise_for_status()
return new_msg
def add_or_update_comment(msg_data, comment_id=None, parent_issue_id=None ):
# comment on an existing issue via the GitHub API
if comment_id:
# edit an existing comment
url = '{0}/repos/OpenTreeOfLife/feedback/issues/comments/{1}'.format(GH_BASE_URL, comment_id)
#print('URL for editing an existing comment:')
#print(url)
resp = requests.patch( url,
headers=GH_POST_HEADERS,
data=json.dumps(msg_data)
)
else:
# create a new comment
url = '{0}/repos/OpenTreeOfLife/feedback/issues/{1}/comments'.format(GH_BASE_URL, parent_issue_id)
#print('URL for adding a new comment:')
#print(url)
resp = requests.post( url,
headers=GH_POST_HEADERS,
data=json.dumps(msg_data)
)
##pprint(resp)
resp.raise_for_status()
try:
new_msg = resp.json()
except:
new_msg = resp.json
return new_msg
def close_issue(issue_id):
# close a thread (issue) on GitHub
url = '{0}/repos/OpenTreeOfLife/feedback/issues/{1}'.format(GH_BASE_URL, issue_id)
#print('URL for closing an existing issue:')
#print(url)
resp = requests.patch( url,
headers=GH_POST_HEADERS,
data=json.dumps({"state":"closed"})
)
##pprint(resp)
resp.raise_for_status()
try:
resp_json = resp.json()
except:
resp_json = resp.json
return resp_json
def delete_comment(comment_id):
# delete a comment on GitHub
url = '{0}/repos/OpenTreeOfLife/feedback/issues/comments/{1}'.format(GH_BASE_URL, comment_id)
#print('URL for deleting an existing comment:')
#print(url)
resp = requests.delete( url,
headers=GH_GET_HEADERS
)
##pprint(resp)
resp.raise_for_status()
try:
resp_json = resp.json()
except:
resp_json = resp.json
# clobber all cached comments (since we have no metadata)
clear_matching_cache_keys("^localcomments:")
return resp_json
def build_localcomments_key(request):
return 'localcomments:'+ request.url +'?'+ repr(request.vars)
def clear_matching_cache_keys(key_pattern):
# ASSUMES we're working with RAM cache
# NOTE that we apparently need to "clear" (using a bogus regex) to get a fresh view of the cache
cache.ram.clear(regex='^_BOGUS_CACHE_KEY_$')
item_count_before = len(cache.ram.storage.keys())
pprint("=== %d RAM cache keys BEFORE clearing: ===" % item_count_before)
for k in cache.ram.storage.keys():
pprint(k)
pprint("===")
pprint("> clearing cached items matching [%s]" % key_pattern)
cache.ram.clear(regex=key_pattern)
item_count_after = len(cache.ram.storage.keys())
pprint("=== %d RAM cache keys AFTER clearing: ===" % item_count_after)
for k in cache.ram.storage.keys():
pprint(k)
pprint("===")
pprint(" %d items removed" % (item_count_before - item_count_after,))
@cache(key=build_localcomments_key(request),
time_expire=60*5,
cache_model=cache.ram)
def get_local_comments(location={}):
# Use the Search API to get all comments for this location.
# See https://developer.github.com/v3/search/#search-issues
# build and encode search text (location "filter")
print('>> its cache key would be:')
print(build_localcomments_key(request))
search_text = ''
for k,v in location.items():
search_text = '{0}"{1} | {2} " '.format( search_text, k, v )
search_text = urllib.quote_plus(search_text.encode('utf-8'), safe='~')
#print search_text
#print('>> calling GitHub API for local issues...')
url = '{0}/search/issues?q={1}repo:OpenTreeOfLife%2Ffeedback+state:open&sort=created&order=desc'
##TODO: search only within body?
## url = '{0}/search/issues?q={1}repo:OpenTreeOfLife%2Ffeedback+in:body+state:open&sort=created&order=asc'
url = url.format(GH_BASE_URL, search_text)
try:
# 21 Apr, 2020: bug fix: keep this .get in a try block, because a HTTPSConnectionPool can be
# raised here (e.g. when GitHub is down)
resp = requests.get(url, headers=GH_GET_HEADERS, timeout=10)
# N.B. Timeout is in seconds, and watches for *any* new data within that time (vs. whole response)
##print(url)
##print(resp)
resp.raise_for_status()
except:
print('call to {u} failed. Returning empty comments list'.format(u=url))
return []
try:
results = resp.json()
except:
results = resp.json
##pprint(results)
##print("Returned {0} issues ({1})".format(
## results["total_count"],
## results["incomplete_results"] and 'INCOMPLETE' or 'COMPLETE'
## ))
return results['items']
def clear_local_comments():
# Examine the JSON payload (now in request.vars) to see if we can clear
# only the affected localcomments. If not, play it safe and clear all
# comments in the cache.
if 'markdown_body' in request.vars:
# If we receive issue Markdown, parse it to recover metadata fields.
# N.B. this is not currently used, but handy to keep in mind!
metadata = parse_comment_metadata(request.vars.markdown_body)
local_url = metadata.get('URL', None)
local_ott_id = metadata.get('Open Tree Taxonomy id', None)
local_synth_node_id = metadata.get('Synthetic tree node id', None)
else:
# normally we'll examine the request vars as-is
metadata = request.vars;
local_url = metadata.get('url', None)
local_ott_id = metadata.get('ottol_id', None)
local_synth_node_id = metadata.get('synthtree_node_id', None)
if local_url or local_ott_id or local_synth_node_id:
# Clobber any cached comment keyed to its metadata, in a way that
# handles Markdown or the more typical form variables.
# N.B. that we err on the side of clobbering, since reloading the
# comment cache is no big deal, while we definitely don't want to show
# stale (cached) comments!
if local_url:
# Extract a root-relative URL from markdown strings like
# "[devtree.opentreeoflife.org/opentree/argus/otol.draft.22@132](http://devtree.opentreeoflife.org/opentree/argus/otol.draft.22@132)"
markdown_url = local_url
parts = markdown_url.split('[')
if len(parts) > 1:
markdown_url = parts[1]
parts = markdown_url.split(']')
markdown_url = parts[0]
parts = markdown_url.split('/')[1:]
root_relative_url = '/' + '/'.join(parts)
else:
# assume we have an absolute URL, and remove three slashes
parts = markdown_url.split('/')[3:]
root_relative_url = '/' + '/'.join(parts)
#print('root_relative_url:')
#print(root_relative_url)
clear_matching_cache_keys("^localcomments:.*'url': '%s'.*" % root_relative_url)
if local_ott_id:
clear_matching_cache_keys("^localcomments:.*'ottol_id': '%s'.*" % local_ott_id)
if local_synth_node_id:
clear_matching_cache_keys("^localcomments:.*'synthtree_node_id': '%s'.*" % local_synth_node_id)
else:
# Play it safe and clobber *all* local comments in cache.
print(">>> No metadata found. CLEARING ALL cached localcomments!")
clear_matching_cache_keys("^localcomments:")
# Build and parse metadata for comments (stored as markdown in GitHub).
# The full footer is used for a thread starter (GitHub issue), while replies
# (appended GitHub comments) use an abbreviated version.
full_footer = """
================================================
Metadata | Do not edit below this line
:------------|:----------
Author | %(Author)s
Upvotes | %(Upvotes)d
URL | %(URL)s
Target node label | %(Target node label)s
Synthetic tree id | %(Synthetic tree id)s
Synthetic tree node id | %(Synthetic tree node id)s
Source tree id(s) | %(Source tree id)s
Open Tree Taxonomy id | %(Open Tree Taxonomy id)s
Supporting reference | %(Supporting reference)s
"""
reply_footer = """
================================================
Metadata | Do not edit below this line
:------------|:----------
Author | %(Author)s
Upvotes | %(Upvotes)s
"""
# TODO: Restore the expertise flag to both footers?
# Claimed Expertise | %(Claimed Expertise)s
# TODO: Move 'Feedback type' from labels to footer?
# Feedback type | %(Feedback type)s
def build_comment_metadata_footer(comment_type='starter', metadata={}):
# build full footer (for starter) or abbreviated (for replies),
# and return the string
if comment_type == 'starter':
# it's a thread starter (a proper GitHub issue)
footer_template = full_footer
else:
# it's a reply (GitHub comment)
footer_template = reply_footer
return footer_template % metadata
def parse_comment_metadata(comment_body):
# extract metadata from comment body, return as dict
metadata = { }
looking_for_footer = True
for line in comment_body.split('\n'):
if looking_for_footer:
if line.startswith('Metadata | '):
looking_for_footer = False
else:
try:
key, value = line.split('|')
except ValueError:
# we're past the footer?
break
key = key.strip()
value = value.strip()
if key.startswith(':---'):
# skip this divider row
continue
metadata[key] = value
return metadata
def get_visible_comment_body(comment_body):
# discard the footer (starting at line '=========...')
visible_lines = [ ]
for line in comment_body.split('\n'):
if line.startswith('======='):
break
visible_lines.append(line)
return '\n'.join(visible_lines)
# Time-zone converstion from UTC to local time (needed for GitHub date-strings),
# adapted from code found here: http://stackoverflow.com/a/13287083
import calendar
from datetime import datetime, timedelta
def utc_to_local(utc_dt):
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
| {
"repo_name": "OpenTreeOfLife/opentree",
"path": "webapp/controllers/plugin_localcomments.py",
"copies": "1",
"size": "45022",
"license": "bsd-2-clause",
"hash": -2446368944814021600,
"line_mean": 45.3189300412,
"line_max": 212,
"alpha_frac": 0.5791612989,
"autogenerated": false,
"ratio": 3.902400970789633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49815622696896333,
"avg_score": null,
"num_lines": null
} |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
# Input images
x = tf.placeholder(tf.float32, shape=[None, 28*28])
# Output classes
y_ = tf.placeholder(tf.float32, shape=[None, 10])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W,
strides=[1, 1, 1, 1],
padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
x_image = tf.reshape(x, [-1, 28, 28, 1])
LEARNING_RATE = 0.03
BATCH_SIZE = 10
CONV1_CHANS = 8
CONV2_CHANS = 4
FC_SIZE = 100
# First conv layer
W_conv1 = weight_variable([5, 5, 1, CONV1_CHANS])
b_conv1 = bias_variable([CONV1_CHANS])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer
W_conv2 = weight_variable([5, 5, CONV1_CHANS, CONV2_CHANS])
b_conv2 = bias_variable([CONV2_CHANS])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# FC layer
W_fc1 = weight_variable([4 * 4 * CONV2_CHANS, FC_SIZE])
b_fc1 = bias_variable([FC_SIZE])
h_pool2_flat = tf.reshape(h_pool2, [-1, 4 * 4 * CONV2_CHANS])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Readout
W_fc2 = weight_variable([FC_SIZE, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2
# Training
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(BATCH_SIZE)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:mnist.test.images, y_: mnist.test.labels})
print("step %d, training accuracy %f"%(i*BATCH_SIZE, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels}))
| {
"repo_name": "jameshanlon/convolutional-neural-network",
"path": "extra/conv2.py",
"copies": "1",
"size": "3233",
"license": "mit",
"hash": -8345645666696928000,
"line_mean": 31.9897959184,
"line_max": 90,
"alpha_frac": 0.6637797711,
"autogenerated": false,
"ratio": 2.8917710196779964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8972540716771507,
"avg_score": 0.0166020148012981,
"num_lines": 98
} |
import direct.directbase.DirectStart
from panda3d.core import CollisionTraverser, CollisionNode
from panda3d.core import CollisionHandlerQueue, CollisionRay
from panda3d.core import AmbientLight, DirectionalLight, LightAttrib
from panda3d.core import TextNode
from panda3d.core import Point3, Vec3, Vec4, BitMask32, Mat4
from direct.gui.OnscreenText import OnscreenText
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task
import sys
# First we define some contants for the colors
BLACK = Vec4(0, 0, 0, 1)
WHITE = Vec4(1, 1, 1, 1)
HIGHLIGHT = Vec4(0, 1, 1, 1)
PIECEBLACK = Vec4(.15, .15, .15, 1)
#Now we define some helper functions that we will need later
#This function, given a line (vector plus origin point) and a desired z value,
#will give us the point on the line where the desired z value is what we want.
#This is how we know where to position an object in 3D space based on a 2D mouse
#position. It also assumes that we are dragging in the XY plane.
#
#This is derived from the mathmatical of a plane, solved for a given point
def PointAtZ(z, point, vec):
return point + vec * ((z - point.getZ()) / vec.getZ())
#A handy little function for getting the proper position for a given square
def SquarePos(i):
return Point3((i % 8) - 3.5, int(i / 8) - 3.5, 0)
#Helper function for determining wheter a square should be white or black
#The modulo operations (%) generate the every-other pattern of a chess-board
def SquareColor(i):
if (i + ((i / 8) % 2)) % 2:
return BLACK
else:
return WHITE
class World(DirectObject):
def __init__(self):
#This code puts the standard title and instruction text on screen
self.title = OnscreenText(text="Panda3D: Tutorial - Mouse Picking",
style=1, fg=(1, 1, 1, 1),
pos=(0.8, -0.95), scale=.07)
self.escapeEvent = OnscreenText(
text="ESC: Quit",
style=1, fg=(1, 1, 1, 1), pos=(-1.3, 0.95),
align=TextNode.ALeft, scale=.05)
self.mouse1Event = OnscreenText(
text="Left-click and drag: Pick up and drag piece",
style=1, fg=(1, 1, 1, 1), pos=(-1.3, 0.90),
align=TextNode.ALeft, scale=.05)
self.accept('escape', sys.exit) #Escape quits
base.disableMouse() #Disble mouse camera control
camera.setPosHpr(0, -13.75, 6, 0, -25, 0) #Set the camera
self.setupLights() #Setup default lighting
#Since we are using collision detection to do picking, we set it up like
#any other collision detection system with a traverser and a handler
self.picker = CollisionTraverser() #Make a traverser
self.pq = CollisionHandlerQueue() #Make a handler
#Make a collision node for our picker ray
self.pickerNode = CollisionNode('mouseRay')
#Attach that node to the camera since the ray will need to be positioned
#relative to it
self.pickerNP = camera.attachNewNode(self.pickerNode)
#Everything to be picked will use bit 1. This way if we were doing other
#collision we could seperate it
self.pickerNode.setFromCollideMask(BitMask32.bit(1))
self.pickerRay = CollisionRay() #Make our ray
self.pickerNode.addSolid(self.pickerRay) #Add it to the collision node
#Register the ray as something that can cause collisions
self.picker.addCollider(self.pickerNP, self.pq)
#self.picker.showCollisions(render)
#Now we create the chess board and its pieces
#We will attach all of the squares to their own root. This way we can do the
#collision pass just on the sqaures and save the time of checking the rest
#of the scene
self.squareRoot = render.attachNewNode("squareRoot")
#For each square
self.squares = [None for i in range(64)]
self.pieces = dict((i, None) for i in range(64)) #MOD
for i in range(64):
#Load, parent, color, and position the model (a single square polygon)
self.squares[i] = loader.loadModel("models/square")
self.squares[i].reparentTo(self.squareRoot)
self.squares[i].setPos(SquarePos(i))
self.squares[i].setColor(SquareColor(i))
#Set the model itself to be collideable with the ray. If this model was
#any more complex than a single polygon, you should set up a collision
#sphere around it instead. But for single polygons this works fine.
self.squares[i].find("**/polygon").node().setIntoCollideMask(
BitMask32.bit(1))
#Set a tag on the square's node so we can look up what square this is
#later during the collision pass
self.squares[i].find("**/polygon").node().setTag('square', str(i))
#We will use this variable as a pointer to whatever piece is currently
#in this square
#The order of pieces on a chessboard from white's perspective. This list
#contains the constructor functions for the piece classes defined below
pieceOrder = (Rook, Knight, Bishop, Queen, King, Bishop, Knight, Rook)
for i in range(8, 16):
#Load the white pawns
self.pieces[i] = Pawn(i, WHITE)
for i in range(48, 56):
#load the black pawns
self.pieces[i] = Pawn(i, PIECEBLACK)
for i in range(8):
#Load the special pieces for the front row and color them white
self.pieces[i] = pieceOrder[i](i, WHITE)
#Load the special pieces for the back row and color them black
self.pieces[i + 56] = pieceOrder[i](i + 56, PIECEBLACK)
#This will represent the index of the currently highlited square
self.hiSq = False
#This wil represent the index of the square where currently dragged piece
#was grabbed from
self.dragging = False
#Start the task that handles the picking
self.mouseTask = taskMgr.add(self.mouseTask, 'mouseTask')
self.accept("mouse1", self.grabPiece) #left-click grabs a piece
self.accept("mouse1-up", self.releasePiece) #releasing places it
#This function swaps the positions of two pieces
def swapPieces(self, fr, to):
temp = self.pieces[fr]
self.pieces[fr] = self.pieces[to]
self.pieces[to] = temp
if self.pieces[fr]:
self.pieces[fr].square = fr
self.pieces[fr].obj.setPos(SquarePos(fr))
if self.pieces[to]:
self.pieces[to].square = to
self.pieces[to].obj.setPos(SquarePos(to))
def mouseTask(self, task):
#This task deals with the highlighting and dragging based on the mouse
#First, clear the current highlight
if self.hiSq is not False:
self.squares[self.hiSq].setColor(SquareColor(self.hiSq))
self.hiSq = False
#Check to see if we can access the mouse. We need it to do anything else
if base.mouseWatcherNode.hasMouse():
#get the mouse position
mpos = base.mouseWatcherNode.getMouse()
#Set the position of the ray based on the mouse position
self.pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())
#If we are dragging something, set the position of the object
#to be at the appropriate point over the plane of the board
if self.dragging is not False:
#Gets the point described by pickerRay.getOrigin(), which is relative to
#camera, relative instead to render
nearPoint = render.getRelativePoint(camera, self.pickerRay.getOrigin())
#Same thing with the direction of the ray
nearVec = render.getRelativeVector(camera, self.pickerRay.getDirection())
self.pieces[self.dragging].obj.setPos(
PointAtZ(.5, nearPoint, nearVec))
#Do the actual collision pass (Do it only on the squares for
#efficiency purposes)
self.picker.traverse(self.squareRoot)
if self.pq.getNumEntries() > 0:
#if we have hit something, sort the hits so that the closest
#is first, and highlight that node
self.pq.sortEntries()
i = int(self.pq.getEntry(0).getIntoNode().getTag('square'))
#Set the highlight on the picked square
self.squares[i].setColor(HIGHLIGHT)
self.hiSq = i
return Task.cont
def grabPiece(self):
#If a square is highlighted and it has a piece, set it to dragging mode
if (self.hiSq is not False and
self.pieces[self.hiSq]):
self.dragging = self.hiSq
self.hiSq = False
def releasePiece(self):
#Letting go of a piece. If we are not on a square, return it to its original
#position. Otherwise, swap it with the piece in the new square
if self.dragging is not False: #Make sure we really are dragging something
#We have let go of the piece, but we are not on a square
if self.hiSq is False:
self.pieces[self.dragging].obj.setPos(
SquarePos(self.dragging))
else:
#Otherwise, swap the pieces
self.swapPieces(self.dragging, self.hiSq)
#We are no longer dragging anything
self.dragging = False
def setupLights(self): #This function sets up some default lighting
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor(Vec4(.8, .8, .8, 1))
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection(Vec3(0, 45, -45))
directionalLight.setColor(Vec4(0.2, 0.2, 0.2, 1))
render.setLight(render.attachNewNode(directionalLight))
render.setLight(render.attachNewNode(ambientLight))
#Class for a piece. This just handels loading the model and setting initial
#position and color
class Piece:
def __init__(self, square, color):
self.obj = loader.loadModel(self.model)
self.obj.reparentTo(render)
self.obj.setColor(color)
self.obj.setPos(SquarePos(square))
#Classes for each type of chess piece
#Obviously, we could have done this by just passing a string to Piece's init.
#But if you watned to make rules for how the pieces move, a good place to start
#would be to make an isValidMove(toSquare) method for each piece type
#and then check if the destination square is acceptible during ReleasePiece
class Pawn(Piece):
model = "models/pawn"
class King(Piece):
model = "models/king"
class Queen(Piece):
model = "models/queen"
class Bishop(Piece):
model = "models/bishop"
class Knight(Piece):
model = "models/knight"
class Rook(Piece):
model = "models/rook"
#Do the main initialization and start 3D rendering
#w = World()
#run()
| {
"repo_name": "agoose77/hivesystem",
"path": "manual/chess/components/TutChessboard.py",
"copies": "1",
"size": "11440",
"license": "bsd-2-clause",
"hash": -4071269278587002400,
"line_mean": 40.7518248175,
"line_max": 89,
"alpha_frac": 0.6462412587,
"autogenerated": false,
"ratio": 3.7300293446364527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4876270603336453,
"avg_score": null,
"num_lines": null
} |
# Adapted from unittest2/loader.py from the unittest2 plugins branch.
# This module contains some code copied from unittest2/loader.py and other
# code developed in reference to that module and others within unittest2.
# unittest2 is Copyright (c) 2001-2010 Python Software Foundation; All
# Rights Reserved. See: http://docs.python.org/license.html
import logging
import traceback
from nose2 import events
from nose2.compat import unittest
log = logging.getLogger(__name__)
__unittest = True
class PluggableTestLoader(object):
"""Test loader that defers all loading to plugins
:param session: Test run session.
.. attribute :: suiteClass
Suite class to use. Default: :class:`unittest.TestSuite`.
"""
suiteClass = unittest.TestSuite
def __init__(self, session):
self.session = session
def loadTestsFromModule(self, module):
"""Load tests from module.
Fires :func:`loadTestsFromModule` hook.
"""
evt = events.LoadFromModuleEvent(self, module)
result = self.session.hooks.loadTestsFromModule(evt)
if evt.handled:
suite = result or self.suiteClass()
else:
suite = self.suiteClass(evt.extraTests)
filterevt = events.ModuleSuiteEvent(self, module, suite)
result = self.session.hooks.moduleLoadedSuite(filterevt)
if result:
return result or self.suiteClass()
return filterevt.suite
def loadTestsFromNames(self, testNames, module=None):
"""Load tests from test names.
Fires :func:`loadTestsFromNames` hook.
"""
event = events.LoadFromNamesEvent(
self, testNames, module)
result = self.session.hooks.loadTestsFromNames(event)
log.debug('loadTestsFromNames event %s result %s', event, result)
if event.handled:
suites = result or []
else:
suites = [self.loadTestsFromName(name, module)
for name in event.names]
if event.extraTests:
suites.extend(event.extraTests)
return self.suiteClass(suites)
def loadTestsFromName(self, name, module=None):
"""Load tests from test name.
Fires :func:`loadTestsFromName` hook.
"""
log.debug('loadTestsFromName %s/%s', name, module)
event = events.LoadFromNameEvent(self, name, module)
result = self.session.hooks.loadTestsFromName(event)
if event.handled:
suite = result or self.suiteClass()
return suite
return self.suiteClass(event.extraTests)
def failedImport(self, name):
"""Make test case representing a failed import."""
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well XXX ?
message += '\n%s' % traceback.format_exc()
return self._makeFailedTest(
'ModuleImportFailure', name, ImportError(message))
def failedLoadTests(self, name, exception):
"""Make test case representing a failed test load."""
return self._makeFailedTest('LoadTestsFailure', name, exception)
def sortTestMethodsUsing(self, name):
"""Sort key for test case test methods."""
return name.lower()
def discover(self, start_dir=None, pattern=None):
"""Compatibility shim for load_tests protocol."""
try:
oldsd = self.session.startDir
self.session.startDir = start_dir
return self.loadTestsFromNames([])
finally:
self.session.startDir = oldsd
def _makeFailedTest(self, classname, methodname, exception):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (unittest.TestCase,), attrs)
return self.suiteClass((TestClass(methodname),))
def __repr__(self):
return '<%s>' % self.__class__.__name__
| {
"repo_name": "leth/nose2",
"path": "nose2/loader.py",
"copies": "1",
"size": "4059",
"license": "bsd-2-clause",
"hash": 6257230855401016000,
"line_mean": 32.825,
"line_max": 74,
"alpha_frac": 0.6385809313,
"autogenerated": false,
"ratio": 4.445783132530121,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 120
} |
# Adapted from @wbond's resource loader.
import sys
import sublime
VERSION = int(sublime.version())
mod_prefix = "advanced_new_file"
reload_mods = []
if VERSION > 3000:
mod_prefix = "AdvancedNewFile." + mod_prefix
from imp import reload
for mod in sys.modules:
if mod[0:15] == 'AdvancedNewFile' and sys.modules[mod] is not None:
reload_mods.append(mod)
else:
for mod in sorted(sys.modules):
if mod[0:17] == 'advanced_new_file' and sys.modules[mod] is not None:
reload_mods.append(mod)
mods_load_order = [
'.anf_util',
'.completion_base',
".completions",
'.completions.nix_completion',
'.completions.windows_completion',
".platform",
".platform.windows_platform",
".platform.nix_platform",
".commands",
".commands.command_base",
".commands.git",
".commands.git.git_command_base",
".commands.helper_commands",
'.commands.new_file_command',
".commands.move_file_command",
".commands.delete_file_command"
]
for suffix in mods_load_order:
mod = mod_prefix + suffix
if mod in reload_mods:
reload(sys.modules[mod])
| {
"repo_name": "herove/dotfiles",
"path": "sublime/Packages/AdvancedNewFile/advanced_new_file/reloader.py",
"copies": "1",
"size": "1154",
"license": "mit",
"hash": 1257705942804746800,
"line_mean": 23.0416666667,
"line_max": 77,
"alpha_frac": 0.6377816291,
"autogenerated": false,
"ratio": 3.4243323442136497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.956211397331365,
"avg_score": 0,
"num_lines": 48
} |
# adapted from whoosh.spelling
"""This module contains helper functions for correcting typos in user queries.
"""
from whoosh import query
from whoosh.spelling import QueryCorrector, Correction
# QueryCorrector objects
class MultiFieldQueryCorrector(QueryCorrector):
"""A simple query corrector based on a mapping of field names to
:class:`Corrector` objects, and a list of ``("fieldname", "text")`` tuples
to correct. And terms in the query that appear in list of term tuples are
corrected using the appropriate corrector.
"""
def __init__(self, correctors, terms, prefix=0, maxdist=2):
"""
:param correctors: a dictionary mapping field names to
:class:`Corrector` objects.
:param terms: a sequence of ``("fieldname", "text")`` tuples
representing terms to be corrected.
:param prefix: suggested replacement words must share this number of
initial characters with the original word. Increasing this even to
just ``1`` can dramatically speed up suggestions, and may be
justifiable since spellling mistakes rarely involve the first
letter of a word.
:param maxdist: the maximum number of "edits" (insertions, deletions,
subsitutions, or transpositions of letters) allowed between the
original word and any suggestion. Values higher than ``2`` may be
slow.
"""
self.correctors = correctors
self.termset = frozenset(terms)
self.prefix = prefix
self.maxdist = maxdist
def correct_query(self, q, qstring):
correctors = self.correctors
termset = self.termset
prefix = self.prefix
maxdist = self.maxdist
corrected_tokens = []
corrected_q = q
field_names = [ t.fieldname for t in q.all_tokens() ]
corrections_data = {}
# .... maybe see if q.... can filter by field_name. create multiple correction obj one per fieldname and return list
for token in q.all_tokens():
fname = token.fieldname
if (fname, token.text) in termset:
sugs = correctors[fname].suggest(token.text, prefix=prefix,
maxdist=maxdist)
if sugs:
if fname not in corrections_data:
corrections_data[fname] = { "corrected_q" : q, "corrected_tokens" : [] }
sug = sugs[0]
corrections_data[fname]['corrected_q'] = corrections_data[fname]['corrected_q'].replace(token.fieldname, token.text, sug)
token.text = sug
corrections_data[fname]['corrected_tokens'].append(token)
return [Correction(q, qstring, corrections_data[f]['corrected_q'], corrections_data[f]['corrected_tokens']) for f in corrections_data]
#return Correction(q, qstring, corrected_q, corrected_tokens)
| {
"repo_name": "braddockcg/internet-in-a-box",
"path": "iiab/whoosh_multi_field_spelling_correction.py",
"copies": "1",
"size": "2974",
"license": "bsd-2-clause",
"hash": 7493908281082443000,
"line_mean": 42.7352941176,
"line_max": 142,
"alpha_frac": 0.6203765972,
"autogenerated": false,
"ratio": 4.291486291486292,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5411862888686292,
"avg_score": null,
"num_lines": null
} |
# Adapted from Will Bonds Package Control (thread_progress.py)
import sublime
class ProgressNotifier():
"""
Animates an indicator, [= ]
:param message:
The message to display next to the activity indicator
:param success_message:
The message to display once the thread is complete
"""
def __init__(self, message, success_message = ''):
self.message = message
self.success_message = success_message
self.stopped = False
self.addend = 1
self.size = 8
sublime.set_timeout(lambda: self.run(0), 100)
def run(self, i):
if self.stopped:
return
before = i % self.size
after = (self.size - 1) - before
sublime.status_message('%s [%s=%s]' % (self.message, ' ' * before, ' ' * after))
if not after:
self.addend = -1
if not before:
self.addend = 1
i += self.addend
sublime.set_timeout(lambda: self.run(i), 100)
def stop(self):
if not self.stopped:
sublime.status_message(self.success_message)
self.stopped = True | {
"repo_name": "NicoSantangelo/sublime-text-trello",
"path": "progress_notifier.py",
"copies": "1",
"size": "1140",
"license": "mit",
"hash": -4004866993503490000,
"line_mean": 25.5348837209,
"line_max": 88,
"alpha_frac": 0.5710526316,
"autogenerated": false,
"ratio": 3.944636678200692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5015689309800692,
"avg_score": null,
"num_lines": null
} |
# Adapted from Will Bonds Package Control (thread_progress.py)
import sublime
class ProgressNotifier():
"""
Animates an indicator, [= ]
:param message:
The message to display next to the activity indicator
:param success_message:
The message to display once the thread is complete
"""
def __init__(self, message, success_message=''):
self.message = message
self.success_message = success_message
self.stopped = False
self.addend = 1
self.size = 8
sublime.set_timeout(lambda: self.run(0), 100)
def run(self, i):
if self.stopped:
return
before = i % self.size
after = (self.size - 1) - before
sublime.status_message('%s [%s=%s]' % (self.message, ' ' * before, ' ' * after))
if not after:
self.addend = -1
if not before:
self.addend = 1
i += self.addend
sublime.set_timeout(lambda: self.run(i), 100)
def stop(self):
sublime.status_message(self.success_message)
self.stopped = True
| {
"repo_name": "NicoSantangelo/sublime-gulp",
"path": "progress_notifier.py",
"copies": "1",
"size": "1103",
"license": "mit",
"hash": -6655684512976986000,
"line_mean": 24.6511627907,
"line_max": 88,
"alpha_frac": 0.5757026292,
"autogenerated": false,
"ratio": 3.9113475177304964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9985743640528615,
"avg_score": 0.0002613012803762738,
"num_lines": 43
} |
# adapted from zmq_server_example.py in tinyrpc
import time, sys
import zmq
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.zmq import ZmqServerTransport
from tinyrpc.server import RPCServer
from tinyrpc.dispatch import RPCDispatcher
class Server(object):
def __init__(self, req_callback):
# print 'initializing Rpc'
self.ctx = zmq.Context()
self.dispatcher = RPCDispatcher()
self.transport = ZmqServerTransport.create(self.ctx, 'tcp://127.0.0.1:8000')
self.req_callback = req_callback
self.rpc_server = RPCServer(
self.transport,
JSONRPCProtocol(),
self.dispatcher
)
self.dispatcher.public(self.request) # register this function (replacing the decorator)
# print 'READYc: '+str(time.clock())
# sys.exit(0)
self.rpc_server.serve_forever()
# def start(self):
# self.rpc_server.serve_forever()
def request(self, req):
return self.req_callback(req)
| {
"repo_name": "dongting/sdnac",
"path": "sdnac/api/rpc.py",
"copies": "1",
"size": "1086",
"license": "apache-2.0",
"hash": 5105035686370608000,
"line_mean": 29.1666666667,
"line_max": 96,
"alpha_frac": 0.6279926335,
"autogenerated": false,
"ratio": 4.037174721189591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.516516735468959,
"avg_score": null,
"num_lines": null
} |
# Adapted in part from Olivier Hagolle
# https://github.com/olivierhagolle/LANDSAT-Download
import os
import urllib
import urllib2
import time
import re
import sys
import math
import subprocess
from datetime import datetime, timedelta
import web_tools
def connect_earthexplorer_proxy(proxy_info, usgs):
print "Establishing connection to Earthexplorer with proxy..."
# contruction d'un "opener" qui utilise une connexion proxy avec autorisation
cookies = urllib2.HTTPCookieProcessor()
proxy_support = urllib2.ProxyHandler({"http": "http://%(user)s:%(pass)s@%(host)s:%(port)s" % proxy_info,
"https": "http://%(user)s:%(pass)s@%(host)s:%(port)s" % proxy_info})
opener = urllib2.build_opener(proxy_support, cookies)
# installation
urllib2.install_opener(opener)
# deal with csrftoken required by USGS as of 7-20-2016
data = urllib2.urlopen("https://ers.cr.usgs.gov").read()
m = re.search(r'<input .*?name="csrf_token".*?value="(.*?)"', data)
if m:
token = m.group(1)
else:
print "Error : CSRF_Token not found"
# sys.exit(-3)
# parametres de connection
params = urllib.urlencode(dict(username=usgs['account'], password=usgs['passwd'], csrf_token=token))
# utilisation
request = urllib2.Request("https://ers.cr.usgs.gov", params, headers={})
f = urllib2.urlopen(request)
data = f.read()
f.close()
if data.find('You must sign in as a registered user to download data or place orders for USGS EROS products') > 0:
print "Authentification failed"
# sys.exit(-1)
return
def connect_earthexplorer_no_proxy(usgs):
# mkmitchel (https://github.com/mkmitchell) solved the token issue
cookies = urllib2.HTTPCookieProcessor()
opener = urllib2.build_opener(cookies)
urllib2.install_opener(opener)
data = urllib2.urlopen("https://ers.cr.usgs.gov").read()
m = re.search(r'<input .*?name="csrf_token".*?value="(.*?)"', data)
if m:
token = m.group(1)
else:
print "Error : CSRF_Token not found"
# sys.exit(-3)
params = urllib.urlencode(dict(username=usgs['account'], password=usgs['passwd'], csrf_token=token))
request = urllib2.Request("https://ers.cr.usgs.gov/login", params, headers={})
f = urllib2.urlopen(request)
data = f.read()
f.close()
if data.find('You must sign in as a registered user to download data or place orders for USGS EROS products') > 0:
print "Authentification failed"
# sys.exit(-1)
return
def download_chunks(url, rep, nom_fic):
""" Downloads large files in pieces
inspired by http://josh.gourneau.com
"""
try:
req = urllib2.urlopen(url)
# if downloaded file is html
if req.info().gettype() == 'text/html':
print "error : file is in html and not an expected binary file"
lines = req.read()
if lines.find('Download Not Found') > 0:
raise TypeError
else:
with open("error_output.html", "w") as f:
f.write(lines)
print "result saved in ./error_output.html"
# sys.exit(-1)
# if file too small
total_size = int(req.info().getheader('Content-Length').strip())
if (total_size < 50000):
print "Error: The file is too small to be a Landsat Image"
print url
# sys.exit(-1)
print nom_fic, total_size
total_size_fmt = sizeof_fmt(total_size)
# download
downloaded = 0
CHUNK = 1024 * 1024 * 8
with open(rep + '/' + nom_fic, 'wb') as fp:
start = time.clock()
print('Downloading {0} ({1}):'.format(nom_fic, total_size_fmt))
while True:
chunk = req.read(CHUNK)
downloaded += len(chunk)
done = int(50 * downloaded / total_size)
sys.stdout.write('\r[{1}{2}]{0:3.0f}% {3}ps'
.format(math.floor((float(downloaded)
/ total_size) * 100),
'=' * done,
' ' * (50 - done),
sizeof_fmt((downloaded // (time.clock() - start)) / 8)))
sys.stdout.flush()
if not chunk: break
fp.write(chunk)
except urllib2.HTTPError, e:
if e.code == 500:
pass # File doesn't exist
else:
print "HTTP Error:", e.code, url
return False
except urllib2.URLError, e:
print "URL Error:", e.reason, url
return False
return rep, nom_fic
def sizeof_fmt(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
# num /= 1024.0
def unzip_image(tgzfile, outputdir):
success = 0
if os.path.exists(outputdir + '/' + tgzfile + '.tgz'):
print "\nunzipping..."
try:
if sys.platform.startswith('linux'):
subprocess.call('mkdir ' + outputdir + '/' + tgzfile, shell=True) # Unix
subprocess.call('tar zxvf ' + outputdir + '/' + tgzfile + '.tgz -C ' + outputdir + '/' + tgzfile,
shell=True) # Unix
elif sys.platform.startswith('win'):
subprocess.call('tartool ' + outputdir + '/' + tgzfile + '.tgz ' + outputdir + '/' + tgzfile,
shell=True) # W32
success = 1
except TypeError:
print 'Failed to unzip %s' % tgzfile
os.remove(outputdir + '/' + tgzfile + '.tgz')
return success
def get_credentials(usgs_path):
print 'USGS txt path: {}'.format(usgs_path)
with file(usgs_path) as f:
(account, passwd) = f.readline().split(' ')
if passwd.endswith('\n'):
passwd = passwd[:-1]
usgs = {'account': account, 'passwd': passwd}
return usgs
def get_station_list_identifier(product):
if product.startswith('LC8'):
identifier = '4923'
stations = ['LGN']
elif product.startswith('LE7'):
identifier = '3373'
stations = ['EDC', 'SGS', 'AGS', 'ASN', 'SG1', 'CUB', 'COA']
elif product.startswith('LT5'):
identifier = '3119'
stations = ['GLC', 'ASA', 'KIR', 'MOR', 'KHC', 'PAC',
'KIS', 'CHM', 'LGS', 'MGR', 'COA', 'MPS', 'CUB']
else:
raise NotImplementedError('Must provide valid product string...')
return identifier, stations
def assemble_scene_id_list(ref_time, prow, end_date, sat, delta=16):
scene_id_list = []
archive_found = False
possible_l7_stations = ['EDC', 'SGS', 'AGS', 'ASN', 'SG1', 'CUB', 'COA']
possible_l8_stations = ['LGN']
possible_l5_stations = ['GLC', 'ASA', 'KIR', 'MOR', 'KHC', 'PAC',
'KIS', 'CHM', 'LGS', 'MGR', 'COA', 'MPS', 'CUB']
if sat == 'LC8':
station_list = possible_l8_stations
elif sat == 'LE7':
station_list = possible_l7_stations
elif sat == 'LT5':
station_list = possible_l5_stations
else:
raise ValueError('Must provide valid satellite...')
while ref_time < end_date:
date_part = datetime.strftime(ref_time, '%Y%j')
padded_pr = '{}{}'.format(str(prow[0]).zfill(3), str(prow[1]).zfill(3))
if not archive_found:
print 'Looking for correct station/version combination.............'
for archive in ['00', '01', '02']:
for location in station_list:
scene_str = '{}{}{}{}{}'.format(sat, padded_pr, date_part, location, archive)
if web_tools.verify_landsat_scene_exists(scene_str):
version = archive
grnd_stn = location
archive_found = True
print 'using version: {}, location: {}'.format(version, location)
break
if archive_found:
break
elif archive_found:
scene_str = '{}{}{}{}{}'.format(sat, padded_pr, date_part, grnd_stn, version)
print 'add scene: {}, for {}'.format(scene_str,
datetime.strftime(ref_time, '%Y-%m-%d'))
scene_id_list.append(scene_str)
ref_time += timedelta(days=delta)
else:
raise NotImplementedError('Did not complete scene listing...')
return scene_id_list
def get_candidate_scenes_list(path_row, sat_name, start_date, end_date=None):
"""
:param path_row: path, datetime obj
:param sat_name: 'LT5', 'LE7', or 'LC8'
:param start_date: datetime object start image search
:param end_date: datetime object finish image search
:param max_cloud_cover: percent cloud cover according to USGS image metadata, float
:param limit_scenes: max number scenese, int
:return: reference overpass = str('YYYYDOY'), station str('XXX') len=3
"""
print '\nsat: {}\n'.format(sat_name)
reference_overpass = web_tools.landsat_overpass_time(path_row,
start_date, sat_name)
print 'ref time: {}'.format(reference_overpass)
scene_list = assemble_scene_id_list(reference_overpass, path_row,
end_date, sat_name)
return scene_list
def down_usgs_by_list(scene_list, output_dir, usgs_creds_txt):
usgs_creds = get_credentials(usgs_creds_txt)
connect_earthexplorer_no_proxy(usgs_creds)
for product in scene_list:
identifier, stations = get_station_list_identifier(product)
base_url = 'https://earthexplorer.usgs.gov/download/'
tail_string = '{}/{}/STANDARD/EE'.format(identifier, product)
url = '{}{}'.format(base_url, tail_string)
tgz_file = '{}.tgz'.format(product)
download_chunks(url, output_dir, tgz_file)
unzip_image(tgz_file, output_dir)
return None
if __name__ == '__main__':
home = os.path.expanduser('~')
start = datetime(2007, 5, 1)
end = datetime(2007, 5, 30)
satellite = 'LT5'
output = os.path.join(home, 'images', satellite)
usgs_creds = os.path.join(home, 'images', 'usgs.txt')
pathrow = 37, 27
print get_candidate_scenes_list(pathrow, satellite, start, end)
# ===============================================================================
| {
"repo_name": "dgketchum/MT_Rsense",
"path": "utils/usgs_download.py",
"copies": "1",
"size": "10603",
"license": "apache-2.0",
"hash": 5956426135794001000,
"line_mean": 34.9423728814,
"line_max": 118,
"alpha_frac": 0.5497500707,
"autogenerated": false,
"ratio": 3.673943173943174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9716701998589384,
"avg_score": 0.0013982492107579454,
"num_lines": 295
} |
# Adaptee (source) interface
class EuropeanSocketInterface:
def voltage(self): pass
def live(self): pass
def neutral(self): pass
def earth(self): pass
# Adaptee
class Socket(EuropeanSocketInterface):
def voltage(self):
return 230
def live(self):
return 1
def neutral(self):
return -1
def earth(self):
return 0
# Target interface
class USASocketInterface:
def voltage(self): pass
def live(self): pass
def neutral(self): pass
# The Adapter
class Adapter(USASocketInterface):
__socket = None
def __init__(self, socket):
self.__socket = socket
def voltage(self):
return 110
def live(self):
return self.__socket.live()
def neutral(self):
return self.__socket.neutral()
# Client
class ElectricKettle:
__power = None
def __init__(self, power):
self.__power = power
def boil(self):
if self.__power.voltage() > 110:
print ("Tetera en llamas!")
else:
if self.__power.live() == 1 and \
self.__power.neutral() == -1:
print ("Coffee time!")
else:
print ("No power.")
def main():
# Plug in
socket = Socket()
adapter = Adapter(socket)
kettle = ElectricKettle(adapter)
# Make coffee
kettle.boil()
return 0
if __name__ == "__main__":
main()
| {
"repo_name": "AnhellO/DAS_Sistemas",
"path": "Ago-Dic-2017/Emilio Almazan/Practica2/adapter.py",
"copies": "1",
"size": "1436",
"license": "mit",
"hash": 8000793049349686000,
"line_mean": 17.8947368421,
"line_max": 48,
"alpha_frac": 0.5577994429,
"autogenerated": false,
"ratio": 3.778947368421053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9787813482887306,
"avg_score": 0.009786665686749133,
"num_lines": 76
} |
""" Adaptep from Colin Raffel's git repo https://github.com/craffel/"""
import numpy as np
import theano
from theano import tensor as T
import lasagne
import nnet_utils
def set_trace():
from IPython.core.debugger import Pdb
import sys
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def train(data, layers, updates_fn, batch_size=16, epoch_size=128,
initial_patience=1000, improvement_threshold=0.99,
patience_increase=5, max_iter=100000):
# specify input and target theano data types
input_var = T.fmatrix()
target_var = T.ivector()
# create a cost expression for training
prediction = lasagne.layers.get_output(layers, input_var)
cost = lasagne.objectives.categorical_crossentropy(
prediction, target_var)
cost = cost.mean()
# create parameter update expressions for training
params = lasagne.layers.get_all_params(layers, trainable=True)
updates = updates_fn(cost, params)
# compile functions for performing training step and returning
# corresponding training cost
train_fn = theano.function(inputs=[input_var, target_var],
outputs=cost,
updates=updates,
allow_input_downcast=True,
on_unused_input='warn')
# create cost expression for validation
# deterministic forward pass to disable droupout layers
val_prediction = lasagne.layers.get_output(layers, input_var,
deterministic=True)
val_cost = lasagne.objectives.categorical_crossentropy(
val_prediction, target_var)
val_cost = val_cost.mean()
val_obj_fn = T.mean(T.neq(T.argmax(val_prediction, axis=1),
target_var), dtype=theano.config.floatX)
# compile a function to compute the validation cost and objective function
validate_fn = theano.function(inputs=[input_var, target_var],
outputs=[val_cost, val_obj_fn],
allow_input_downcast=True)
# create data iterators
train_data_iterator = nnet_utils.get_next_batch(
data['train'][:, :-1], data['train'][:, -1], batch_size, max_iter)
patience = initial_patience
current_val_cost = np.inf
train_cost = 0.0
for n, (x_batch, y_batch) in enumerate(train_data_iterator):
train_cost += train_fn(x_batch, y_batch)
# Stop training if NaN is encountered
if not np.isfinite(train_cost):
print 'Bad training er {} at iteration {}'.format(train_cost, n)
break
if n and not (n % epoch_size):
epoch_result = {'iteration': n,
'train_cost': train_cost / float(epoch_size),
'validate_cost': 0.0,
'validate_objective': 0.0}
# compute validation cost and objective
cost, obj = validate_fn(data['validate'][:, :-1],
data['validate'][:, -1])
epoch_result['validate_cost'] = float(cost)
epoch_result['validate_objective'] = float(obj)
# Test whether this validate cost is the new smallest
if epoch_result['validate_cost'] < current_val_cost:
# To update patience, we must be smaller than
# improvement_threshold*(previous lowest validation cost)
patience_cost = improvement_threshold*current_val_cost
if epoch_result['validate_cost'] < patience_cost:
# Increase patience by the supplied about
patience += epoch_size*patience_increase
# Even if we didn't increase patience, update lowest valid cost
current_val_cost = epoch_result['validate_cost']
# Store patience after this epoch
epoch_result['patience'] = patience
if n > patience:
break
yield epoch_result
def build_general_network(input_shape, n_layers, widths,
non_linearities, drop_out=True):
"""
Parameters
----------
input_shape : tuple of int or None (batchsize, rows, cols)
Shape of the input. Any element can be set to None to indicate that
dimension is not fixed at compile time
"""
# GlorotUniform is the default mechanism for initializing weights
for i in range(n_layers):
if i == 0: # input layer
layers = lasagne.layers.InputLayer(shape=input_shape)
else: # hidden and output layers
layers = lasagne.layers.DenseLayer(layers,
num_units=widths[i],
nonlinearity=non_linearities[i])
if drop_out and i < n_layers-1: # output layer has no dropout
layers = lasagne.layers.DropoutLayer(layers, p=0.5)
return layers
| {
"repo_name": "rafaelvalle/MDI",
"path": "neural_networks.py",
"copies": "1",
"size": "5022",
"license": "mit",
"hash": -2063686782664809500,
"line_mean": 39.176,
"line_max": 79,
"alpha_frac": 0.583831143,
"autogenerated": false,
"ratio": 4.241554054054054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5325385197054054,
"avg_score": null,
"num_lines": null
} |
""" Adaptep from Colin Raffel's git repo https://github.com/craffel/
Shared utility functions for downsampled hash sequence experiments.
"""
import os
import numpy as np
import lasagne
import deepdish
import traceback
import functools
import glob
import sys
import simple_spearmint
import neural_networks
def run_trial(data, nnet_params, hyperparameter_space, train_function):
"""Train a network given the task and hyperparameters and return the result.
Parameters
----------
data: np.ndarray
dataset from which train and validate set will be built using k-fold
cross validation. Last column must be target variable.
nnet_params: dict
Hyperparameter values that are not going to be optimized but parametrize
the neural network.
hyperparameter_space : dict
Dictionary of model hyperparameters
train_function : callable
This function will be called with the constructed network, training
data, and hyperparameters to create a model.
Returns
-------
best_objective : float
Lowest objective value achieved.
best_epoch : dict
Statistics about the epoch during which the lowest objective value was
achieved.
best_params : dict
Parameters of the model for the best-objective epoch.
"""
# We will be modifying params, so make a copy of it
hyperparameter_space = dict(hyperparameter_space)
print ',\n'.join(['\t{} : {}'.format(k, v)
for k, v in hyperparameter_space.items()])
# data is standardized during preprocessing step
# Get training set statistics for standardization
# input_mean = np.mean(np.concatenate(data[:, :-1], axis=1), axis=1)
# input_std = np.std(np.concatenate(data[:, :-1], axis=1), axis=1)
# create train and validation indices
train_ids = np.random.binomial(1, .7, len(data)).astype(bool)
data = {'train': data[train_ids], 'validate': data[~train_ids]}
# Choose network structure based on network param
if hyperparameter_space['network'] == 'general_network':
build_network_layers = neural_networks.build_general_network
else:
raise ValueError('Unknown network {}'.format(
hyperparameter_space['network']))
layers = build_network_layers(
(nnet_params['batch_size'], data['train'].shape[1]-1), # last is target
nnet_params['n_layers'],
nnet_params['widths'],
nnet_params['non_linearities'],
drop_out=hyperparameter_space['dropout'])
# Generate updates-creating function
updates_function = functools.partial(
nnet_params['update_func'],
learning_rate=hyperparameter_space['learning_rate'],
rho=hyperparameter_space['momentum'])
# Create a list of epochs
epochs = []
# Keep track of lowest objective found so far
best_objective = np.inf
try:
for epoch in train_function(data, layers, updates_function,
nnet_params['batch_size'],
nnet_params['epoch_size']):
# Stop training if a nan training cost is encountered
if not np.isfinite(epoch['train_cost']):
break
epochs.append(epoch)
if epoch['validate_objective'] < best_objective:
best_objective = epoch['validate_objective']
best_epoch = epoch
best_model = lasagne.layers.get_all_param_values(layers)
print "{}: {}, ".format(epoch['iteration'],
epoch['validate_objective']),
sys.stdout.flush()
# If there was an error while training, report it to whetlab
except Exception:
print "ERROR: "
print traceback.format_exc()
return np.nan, {}, {}
print
# Check that all training costs were not NaN; return NaN if any were.
success = np.all([np.isfinite(e['train_cost']) for e in epochs])
if np.isinf(best_objective) or len(epochs) == 0 or not success:
print ' Failed to converge.'
print
return np.nan, {}, {}
else:
for k, v in best_epoch.items():
print "\t{:>35} | {}".format(k, v)
print
return best_objective, best_epoch, best_model
def parameter_search(data, nnet_params, hyperparameter_space, trial_directory,
model_directory, train_function, model_name='best_model',
n_models=10):
"""Run parameter optimization given some train function, writing out results
Parameters
----------
data: np.ndarray
Matrix where rows are observations and columns are feature values.
Last column must be target value.
The data will be use to create a randomized train and validate set.
nnet_params: dict
Hyperparameter values that are not going to be optimized but parametrize
the neural network.
hyperparameter_space : dict
Hyperparameter space (in the format used by `simple_spearmint`) to
optimize over.
trial_directory : str
Directory where parameter optimization trial results will be written.
model_directory : str
Directory where the best-performing model will be written
train_function : callable
This function will be called with the constructed network, training
data, and hyperparameters to create a model.
model_name : str
String to be used when saving models to file
n_models_to_save : int
Number of best models to save
"""
# Create parameter trials directory if it doesn't exist
if not os.path.exists(trial_directory):
os.makedirs(trial_directory)
# Create model directory if it doesn't exist
if not os.path.exists(model_directory):
os.makedirs(model_directory)
# Create SimpleSpearmint suggester instance
ss = simple_spearmint.SimpleSpearmint(hyperparameter_space)
# Load in previous results for "warm start"
for trial_file in glob.glob(os.path.join(trial_directory, '*.h5')):
trial = deepdish.io.load(trial_file)
ss.update(trial['hyperparameters'], trial['best_objective'])
# Run parameter optimization forever
best_scores = np.empty((n_models,))
best_scores[:] = np.inf
while True:
# Get a new suggestion
suggestion = ss.suggest()
# Train a network with these hyperparameters
best_objective, best_epoch, best_model = run_trial(
data, nnet_params, suggestion, train_function)
# Update spearmint on the result
ss.update(suggestion, best_objective)
# Write out a result file
trial_filename = ','.join('{}={}'.format(k, v)
for k, v in suggestion.items()) + '.h5'
deepdish.io.save(os.path.join(trial_directory, trial_filename),
{'hyperparameters': suggestion,
'best_objective': best_objective,
'best_epoch': best_epoch})
# We will write the N best models
idx_max = np.argmax(best_scores)
if (not np.isnan(best_objective) and (best_objective <
best_scores[idx_max])):
best_scores[idx_max] = best_objective
deepdish.io.save(os.path.join(model_directory,
"{}_{}.h5".format(model_name,
idx_max)),
best_model)
"""
# Also write out the entire model when the objective is the smallest
# We don't want to write all models; they are > 100MB each
if (not np.isnan(best_objective) and
best_objective == np.nanmin(ss.objective_values)):
deepdish.io.save(
os.path.join(model_directory, model_name+'.h5'), best_model)
"""
| {
"repo_name": "rafaelvalle/MDI",
"path": "bayesian_parameter_optimization.py",
"copies": "1",
"size": "7953",
"license": "mit",
"hash": 7062951554247341000,
"line_mean": 41.0793650794,
"line_max": 80,
"alpha_frac": 0.6161197033,
"autogenerated": false,
"ratio": 4.364983534577387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5481103237877387,
"avg_score": null,
"num_lines": null
} |
"""Adapter - Converts django models to Front-end format.
Calls on the model's get_field_order method to know which fields that
are available for display and what order. Returns a config json
that dumps the field config and the json data for model/table.
"""
import datetime
import json
import string
from django.db.models import Model
from django.db.models.query import QuerySet
from main.constants import UNDEFINED_STRING
from main.models import ExperimentSample
OBJ_LIST = 'obj_list'
# Fields that are not explicitly displayed but are used on the frontend
# to update views of particular columns.
# TODO: Make each field encapsulate its own special display logic.
HIDDEN_PAIR_FIELDS = [
'href',
'uid',
]
def adapt_model_to_frontend(model, filters={}, obj_list=None, **kwargs):
"""Converts django models to frontend format.
Calls on the model's get_field_order method to know which fields that
are available for display and what order.
Args:
model: The django model that we are adapting
filters: The filter conditions on the model (i.e. select
which instances of the model to select.)
obj_list: If provided, use this as the set of objects to filter.
NOTE: I actually want to permanently change the interface to this
rather than doing filtering here.
Returns:
A config json that dumps the field config and the json data for
model/table display.
"""
# Fetch all objects in this model and package as json to be rendered into
# the dom. The data will be displayed to the user, e.g. via the javascript
# DataTables component.
# Get all objects that pass the filter.
if obj_list is None:
obj_list = model.objects.filter(**filters)
# A list of dicts with object data, where each dict is one object
# and all the fields required for front-end display.
fe_obj_list = [
adapt_model_instance_to_frontend(obj, **kwargs)
for obj in obj_list]
# Get a list of fields required for displaying the objects, in the order
# in which they should be displayed.
field_dict_list = model.get_field_order(**kwargs)
# Each field is a dict with two keys, 'field' for field name and 'verbose'
# for display name. Get each. If 'verbose' is missing, then make verbose
# be the field with _'s turned to spaces and Title Cased.
field_list = [fdict['field'] for fdict in field_dict_list]
# Get the verbose field names, which will be used as column headers.
def _get_verbose(fdict):
if 'verbose' in fdict:
return fdict['verbose']
else:
return string.capwords(fdict['field'],'_').replace('_',' ')
field_verbose_names = [_get_verbose(fdict) for fdict in field_dict_list]
# A list of dicts containing the order of each column and the field titles
# for each column, used for configuring jquery.datatables.js
obj_field_config = [{
'mData': name,
'sTitle': verbose_name
} for (name, verbose_name) in zip(field_list, field_verbose_names)]
# Package the result.
return json.dumps({
OBJ_LIST: fe_obj_list,
'field_config': obj_field_config
})
def adapt_experiment_samples_to_frontend(filters={}, obj_list=None, **kwargs):
""" The sample metadata fields require their own custom adapter. """
# Get all objects that pass the filter.
if obj_list is None:
obj_list = ExperimentSample.objects.filter(**filters).order_by('label')
json_fields = {}
for obj in obj_list:
json_field_dicts = dict(
[(key,{'field':key}) for key in obj.data.keys()])
json_fields.update(json_field_dicts)
# A list of dicts with object data, where each dict is one object
# and all the fields required for front-end display.
fe_obj_list = []
for obj in obj_list:
# default to empty string
obj_json_fields = dict((field, '') for field in json_fields)
obj_json_fields.update(obj.data)
fe_obj_list.append(adapt_model_instance_to_frontend(obj,
field_info= obj_json_fields,
**kwargs))
# Get a list of fields required for displaying the objects, in the order
# in which they should be displayed.
field_dict_list = ExperimentSample.get_field_order(**kwargs)
field_dict_list.extend(json_fields.values())
# Each field is a dict with two keys, 'field' for field name and 'verbose'
# for display name. Get each. If 'verbose' is missing, then make verbose
# be the field with _'s turned to spaces and Title Cased.
field_list = [fdict['field'] for fdict in field_dict_list]
# Get the verbose field names, which will be used as column headers.
def _get_verbose(fdict):
if 'verbose' in fdict:
return fdict['verbose']
else:
return string.capwords(fdict['field'],'_').replace('_',' ')
field_verbose_names = [_get_verbose(fdict) for fdict in field_dict_list]
# A list of dicts containing the order of each column and the field titles
# for each column, used for configuring jquery.datatables.js
obj_field_config = [{
'mData': name,
'sTitle': verbose_name
} for (name, verbose_name) in zip(field_list, field_verbose_names)]
# Package the result.
return json.dumps({
OBJ_LIST: fe_obj_list,
'field_config': obj_field_config
})
def adapt_model_instance_to_frontend(model_instance, field_info={}, **kwargs):
"""Adapts a single model instance to the frontend representation.
Args:
model_instance: An instance of a Model object. The model class must
implement a get_field_order() method.
field_info: If called recursively from
get_model_field_fe_representation(), function also be passed
with field_info keys from parent_model.get_field_order(). This
can decorate the serialized model with information like CSS class,
state, instructions on how to render in datatable_component.js,
etc.
Returns:
A dictionary representation of the model. May contained nested
objects.
"""
# The model class.
model_type = type(model_instance)
# The visible fields of the model.
visible_field_names = [f['field']
for f in model_type.get_field_order(**kwargs)]
visible_field_dict = {f['field']: f
for f in model_type.get_field_order(**kwargs)}
# Get (key, value) pairs for visible fields.
visible_field_pairs = [
(field, get_model_field_fe_representation(
model_instance, field, visible_field_dict[field],
**kwargs))
for field in visible_field_names]
# Other values.
other_pairs = []
for key in HIDDEN_PAIR_FIELDS:
if hasattr(model_instance, 'custom_getattr'):
hidden_value = model_instance.custom_getattr(key)
if hidden_value == UNDEFINED_STRING:
continue
other_pairs.append((key, hidden_value))
elif hasattr(model_instance, key):
other_pairs.append((key, getattr(model_instance, key)))
# Add in keys from field_info, which are inherited from parent model, if
# this function is called recursively from
# get_model_field_fe_representation().
if field_info:
other_pairs.extend(field_info.items())
# Wrap the results in a dictionary.
return dict(visible_field_pairs + other_pairs)
def get_model_field_fe_representation(model_obj, field, field_info={},
**kwargs):
"""Returns the best frontend representation for a model field that is
implemented.
This method allows recursively diving into models.
"""
if hasattr(model_obj, 'custom_getattr'):
model_field = model_obj.custom_getattr(field)
else:
model_field = getattr(model_obj, field)
# Maybe special handling if ModelField is of special type.
if isinstance(model_field, Model):
return adapt_model_instance_to_frontend(model_field, field_info)
elif model_field.__class__.__name__ == 'ManyRelatedManager':
return [adapt_model_instance_to_frontend(m, field_info)
for m in model_field.all()]
elif isinstance(model_field, QuerySet):
return [adapt_model_instance_to_frontend(m, field_info)
for m in model_field]
elif isinstance(model_field, datetime.datetime):
return model_field.strftime("%Y-%m-%d %H:%M:%S")
# Default. No further special handling needed.
return str(model_field)
| {
"repo_name": "woodymit/millstone_accidental_source",
"path": "genome_designer/main/adapters.py",
"copies": "1",
"size": "8660",
"license": "mit",
"hash": -7724443117070356000,
"line_mean": 36.8165938865,
"line_max": 79,
"alpha_frac": 0.6538106236,
"autogenerated": false,
"ratio": 4.020427112349118,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012205903589838244,
"num_lines": 229
} |
'''Adapter for access to S3 filesystem.'''
import os
from abc import ABCMeta, abstractmethod
from conans.errors import NotFoundException
from conans.util.files import relative_dirs, rmdir, md5sum, decode_text
from conans.util.files import path_exists
from conans.paths import SimplePaths
class ServerStorageAdapter(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_download_urls(self, paths, user=None):
raise NotImplementedError()
@abstractmethod
def get_upload_urls(self, paths_sizes, user=None):
raise NotImplementedError()
@abstractmethod
def get_snapshot(self, absolute_path="", files_subset=None):
raise NotImplementedError()
@abstractmethod
def delete_folder(self, path):
raise NotImplementedError()
@abstractmethod
def delete_empty_dirs(self, deleted_refs):
raise NotImplementedError()
class ServerDiskAdapter(ServerStorageAdapter):
'''Manage access to disk files with common methods required
for conan operations'''
def __init__(self, base_url, base_storage_path, updown_auth_manager):
"""
:param: base_url Base url for generate urls to download and upload operations"""
self.base_url = base_url
# URLs are generated removing this base path
self.updown_auth_manager = updown_auth_manager
self._store_folder = base_storage_path
def get_download_urls(self, paths, user=None):
'''Get the urls for download the specified files using s3 signed request.
returns a dict with this structure: {"filepath": "http://..."}
paths is a list of path files '''
assert isinstance(paths, list)
ret = {}
for filepath in paths:
url_path = os.path.relpath(filepath, self._store_folder)
url_path = url_path.replace("\\", "/")
# FALTA SIZE DEL FICHERO PARA EL UPLOAD URL!
signature = self.updown_auth_manager.get_token_for(url_path, user)
url = "%s/%s?signature=%s" % (self.base_url, url_path, decode_text(signature))
ret[filepath] = url
return ret
def get_upload_urls(self, paths_sizes, user=None):
'''Get the urls for upload the specified files using s3 signed request.
returns a dict with this structure: {"filepath": "http://..."}
paths_sizes is a dict of {path: size_in_bytes} '''
assert isinstance(paths_sizes, dict)
ret = {}
for filepath, filesize in paths_sizes.items():
url_path = os.path.relpath(filepath, self._store_folder)
url_path = url_path.replace("\\", "/")
# FALTA SIZE DEL FICHERO PARA EL UPLOAD URL!
signature = self.updown_auth_manager.get_token_for(url_path, user, filesize)
url = "%s/%s?signature=%s" % (self.base_url, url_path, decode_text(signature))
ret[filepath] = url
return ret
def get_snapshot(self, absolute_path="", files_subset=None):
"""returns a dict with the filepaths and md5"""
if not path_exists(absolute_path, self._store_folder):
raise NotFoundException("")
paths = relative_dirs(absolute_path)
if files_subset is not None:
paths = set(paths).intersection(set(files_subset))
abs_paths = [os.path.join(absolute_path, relpath) for relpath in paths]
return {filepath: md5sum(filepath) for filepath in abs_paths}
def delete_folder(self, path):
'''Delete folder from disk. Path already contains base dir'''
if not path_exists(path, self._store_folder):
raise NotFoundException("")
rmdir(path)
def delete_file(self, path):
'''Delete files from bucket. Path already contains base dir'''
if not path_exists(path, self._store_folder):
raise NotFoundException("")
os.remove(path)
def delete_empty_dirs(self, deleted_refs):
paths = SimplePaths(self._store_folder)
for ref in deleted_refs:
ref_path = paths.conan(ref)
for _ in range(4):
if os.path.exists(ref_path):
try: # Take advantage that os.rmdir does not delete non-empty dirs
os.rmdir(ref_path)
except OSError:
break # not empty
ref_path = os.path.dirname(ref_path)
| {
"repo_name": "mropert/conan",
"path": "conans/server/store/disk_adapter.py",
"copies": "6",
"size": "4396",
"license": "mit",
"hash": -3717030601240870400,
"line_mean": 37.9026548673,
"line_max": 90,
"alpha_frac": 0.6253412193,
"autogenerated": false,
"ratio": 4.123827392120075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006018468177454639,
"num_lines": 113
} |
""" Adapter for Clova Extensions Kit """
import traceback
from cek import (
Clova,
URL,
IntentRequest
)
from ..serializer import dumps
from .base import Adapter
from ..models import Message
class ClovaAdapter(Adapter):
"""
Adapter for Clova Extensions Kit
Attributes
----------
bot : minette.Minette
Instance of Minette
application_id : str
Application ID of Clova Skill
default_language : str
Default language of Clova Skill
clova : Clova
Clova Extensions Kit API
config : minette.Config
Configuration
timezone : pytz.timezone
Timezone
logger : logging.Logger
Logger
debug : bool
Debug mode
"""
def __init__(self, bot=None, *, debug=False,
application_id=None, default_language=None, **kwargs):
"""
Parameters
----------
bot : minette.Minette, default None
Instance of Minette.
If None, create new instance of Minette by using `**kwargs`
application_id : str or None, default None
Application ID for your Clova Skill
default_language : str or None, default None
Default language. ("en" / "ja" / "ko")
If None, "ja" is set to Clova Extensions Kit API object
debug : bool, default False
Debug mode
"""
super().__init__(bot=bot, threads=0, debug=debug, **kwargs)
self.application_id = application_id or \
self.config.get(section="clova_cek", key="application_id")
self.default_language = default_language or \
self.config.get(section="clova_cek", key="default_language") or "ja"
self.clova = Clova(application_id=self.application_id,
default_language=self.default_language,
debug_mode=debug)
# handler for all types of request
@self.clova.handle.default
def default(clova_request):
return clova_request
def handle_http_request(self, request_data, request_headers):
"""
Interface to chat with Clova Skill
Parameters
----------
request_data : bytes
Request data from Clova as bytes
request_headers : dict
Request headers from Clova as dict
Returns
-------
response : Response
Response from chatbot. Send back `json` attribute to Clova API
"""
clova_request = self.clova.route(request_data, request_headers)
return self.handle_event(clova_request)
def handle_event(self, clova_request):
# execute bot
channel_messages, _ = super().handle_event(clova_request)
# print response for debug
for msg in channel_messages:
if self.debug:
self.logger.info(msg)
else:
self.logger.info("Minette> {}".format(msg["speech_value"]))
# build response message
speech_values = [msg["speech_value"] for msg in channel_messages]
end_session = channel_messages[-1]["end_session"]
reprompt = channel_messages[-1]["reprompt"]
if len(speech_values) == 1:
return dumps(self.clova.response(
speech_values[0], end_session=end_session, reprompt=reprompt))
else:
return dumps(self.clova.response(
speech_values, end_session=end_session, reprompt=reprompt))
@staticmethod
def _to_minette_message(clova_request):
"""
Convert ClovaRequest object to Minette Message object
Parameters
----------
clova_request : cek.Request
Request from clova
Returns
-------
message : minette.Message
Request converted into Message object
"""
msg = Message(
type=clova_request.type,
channel="LINE",
channel_detail="Clova",
channel_user_id=clova_request.session.user.id if clova_request.session._session else "",
channel_message=clova_request
)
# Set intent and entities when IntentRequest
if isinstance(clova_request, IntentRequest):
msg.intent = clova_request.name
# if clova_request.slots: <- Error occures when no slot values
if clova_request._request["intent"]["slots"]:
msg.entities = clova_request.slots
return msg
@staticmethod
def _to_channel_message(message):
"""
Convert Minette Message object to LINE SendMessage object
Parameters
----------
response : Message
Response message object
Returns
-------
response : SendMessage
SendMessage object for LINE Messaging API
"""
return {
"speech_value": URL(message.text) if message.type == "url" else message.text,
"end_session": message.entities.get("end_session", True),
"reprompt": message.entities.get("reprompt", None)
}
| {
"repo_name": "uezo/minette-python",
"path": "minette/adapter/clovaadapter.py",
"copies": "1",
"size": "5119",
"license": "apache-2.0",
"hash": -8874002624017316000,
"line_mean": 30.99375,
"line_max": 100,
"alpha_frac": 0.5780425864,
"autogenerated": false,
"ratio": 4.397766323024055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5475808909424056,
"avg_score": null,
"num_lines": null
} |
"""Adapter for RunSnakeRun to load coldshot profiles"""
import wx, sys, os, logging
log = logging.getLogger( __name__ )
from squaremap import squaremap
from coldshot import stack,loader
class BaseColdshotAdapter( squaremap.DefaultAdapter):
"""Base class for the various adapters"""
percentageView = False
total = 0
def filename( self, node ):
return getattr(node,'path',None)
color_mapping = None
def background_color(self, node, depth):
"""Create a (unique-ish) background color for each node"""
if self.color_mapping is None:
self.color_mapping = {}
color = self.color_mapping.get(node.key)
if color is None:
depth = len(self.color_mapping)
red = (depth * 10) % 255
green = 200 - ((depth * 5) % 200)
blue = (depth * 25) % 200
self.color_mapping[node.key] = color = wx.Colour(red, green, blue)
return color
def SetPercentage(self, percent, total):
"""Set whether to display percentage values (and total for doing so)"""
self.percentageView = percent
self.total = total
def parents(self, node):
return getattr(node, 'parents', [])
def label(self, node):
if self.percentageView and self.total:
time = '%0.2f%%' % round(node.cumulative * 100.0 / self.total, 2)
else:
time = '%0.3fs' % round(node.cumulative, 3)
if hasattr( node, 'line' ):
return '%s@%s:%s [%s]' % (node.name, node.filename, node.line, time)
else:
return '%s [%s]'%( node.name, time )
class ColdshotAdapter(BaseColdshotAdapter):
"""Adapts a coldshot.loader.Loader into a Squaremap-compatible structure"""
def value(self, node, parent=None):
if parent:
return parent.child_cumulative_time(node)
else:
return node.cumulative
def empty(self, node):
"""Calculate percentage of "empty" time"""
return node.empty
#
#class ColdshotCallsAdapter( BaseColdshotAdapter ):
# def value(self, node, parent=None):
# return node.cumulative / parent.cumulative
#
# def empty(self, node):
# """Calculate percentage of "empty" time"""
# return node.empty
class FunctionLineWrapper( object ):
def __init__( self, function_info, line_info ):
self.function_info = function_info
self.line_info = line_info
@property
def children( self ):
return []
@property
def parents( self ):
return [ self.function_info ]
@property
def cumulative( self ):
return self.line_info.time * self.function_info.loader.timer_unit
@property
def empty( self ):
return 0.0
@property
def local( self ):
return self.line_info.time * self.function_info.loader.timer_unit
@property
def key( self ):
return self.function_info.key
@property
def name( self ):
return '%s:%s'%( self.line_info.line, self.function_info.filename, )
@property
def calls( self ):
return self.line_info.calls
class ModuleAdapter( ColdshotAdapter ):
"""Currently doesn't do anything different"""
def label(self, node):
if isinstance( node, stack.FunctionInfo ):
return super( ModuleAdapter, self ).label( node )
if self.percentageView and self.total:
time = '%0.2f%%' % round(node.cumulative * 100.0 / self.total, 2)
else:
time = '%0.3fs' % round(node.cumulative, 3)
return '%s [%s]'%(node.key or 'PYTHONPATH', time)
def parents( self, node ):
if isinstance( node, stack.FunctionInfo ):
parent = node.loader.modules.get( node.module )
if parent:
return [parent]
return []
elif isinstance( node, stack.FunctionLineInfo ):
return [node.function]
else:
return getattr( node, 'parents', [] )
def children( self, node ):
if isinstance( node, stack.FunctionInfo ):
return [
FunctionLineWrapper( node, line )
for lineno,line in sorted( node.line_map.items())
]
return ColdshotAdapter.children( self, node )
def label(self, node):
if isinstance( node, FunctionLineWrapper ):
return node.name
return ColdshotAdapter.label( self, node )
class Loader( loader.Loader ):
"""Coldshot loader subclass with knowledge of squaremap adapters"""
def functions_rows( self ):
"""Get cProfile-like function metadata rows
returns an ID: function mapping
"""
return self.info.functions
def location_rows( self ):
"""Get our location records (finalized)
returns an module-name: Grouping mapping
"""
self.info.finalize_modules()
return self.info.modules
ROOTS = ['functions','location' ]# ,'thread','calls']
def get_root( self, key ):
"""Retrieve the given root by type-key"""
return self.info.roots[key]
def get_rows( self, key ):
"""Get the set of rows for the type-key"""
return getattr( self, '%s_rows'%(key,) )( )
def get_adapter( self, key ):
"""Get an adapter for our given key"""
if key == 'functions':
return ColdshotAdapter()
elif key == 'location':
return ModuleAdapter()
else:
raise KeyError( """Unknown root type %s"""%( key, ))
| {
"repo_name": "ktan2020/legacy-automation",
"path": "win/Lib/site-packages/runsnakerun/coldshotadapter.py",
"copies": "3",
"size": "5580",
"license": "mit",
"hash": 4717340373897047000,
"line_mean": 33.6583850932,
"line_max": 80,
"alpha_frac": 0.5853046595,
"autogenerated": false,
"ratio": 4.046410442349528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03375968394440034,
"num_lines": 161
} |
"""Adapter for the Requests Library"""
import json
import requests
import requests.auth
from emma import exceptions as ex
from emma.adapter import AbstractAdapter
def process_response(response):
"""Takes a :class:`Response` and produces python built-ins"""
if response.status_code == 400:
raise ex.ApiRequest400(response)
elif response.status_code == 404:
return None
elif response.status_code > 200:
raise ex.ApiRequestFailed(response)
return response.json()
class RequestsAdapter(AbstractAdapter):
"""
Emma API Adapter for the `Requests Library
<http://docs.python-requests.org/>`_
:param auth: A dictionary with keys for your account id and public/private
keys
:type auth: :class:`dict`
Usage::
>>> from emma.adapter.requests_adapter import RequestsAdapter
>>> adptr = RequestsAdapter({
... "account_id": "1234",
... "public_key": "08192a3b4c5d6e7f",
... "private_key": "f7e6d5c4b3a29180"})
>>> adptr
<RequestsAdapter>
"""
def __init__(self, auth):
super(RequestsAdapter, self).__init__()
self.auth = requests.auth.HTTPBasicAuth(
auth['public_key'],
auth['private_key'])
self.url = "https://api.e2ma.net/%s" % auth['account_id']
def post(self, path, data=None):
"""
Takes an effective path (portion after https://api.e2ma.net/:account_id)
and a parameter dictionary, then passes these to :func:`requests.post`
:param path: The path portion of a URL
:type path: :class:`str`
:param data: The content to encode
:type data: :class:`object`
:rtype: JSON-encoded value or None (if 404)
Usage::
>>> from emma.adapter.requests_adapter import RequestsAdapter
>>> adptr = RequestsAdapter({
... "account_id": "1234",
... "public_key": "08192a3b4c5d6e7f",
... "private_key": "f7e6d5c4b3a29180"})
>>> adptr.post('/members', {...})
{'import_id': 2001}
"""
return process_response(
requests.post(
self.url + "%s" % path,
data=json.dumps(data),
auth=self.auth))
def get(self, path, params=None):
"""
Takes an effective path (portion after https://api.e2ma.net/:account_id)
and a parameter dictionary, then passes these to :func:`requests.get`
:param path: The path portion of a URL
:type path: :class:`str`
:param params: The dictionary of HTTP parameters to encode
:type params: :class:`dict`
:rtype: JSON-encoded value or None (if 404)
Usage::
>>> from emma.adapter.requests_adapter import RequestsAdapter
>>> adptr = RequestsAdapter({
... "account_id": "1234",
... "public_key": "08192a3b4c5d6e7f",
... "private_key": "f7e6d5c4b3a29180"})
>>> adptr.get('/members', {...})
[{...}, {...}, ...] # first 500 only
>>> adptr.count_only = True
>>> adptr.get('/members', {...})
999
>>> adptr.start = 500
>>> adptr.end = 1000
>>> adptr.get('/members', {...})
[{...}, {...}, ...] # 500-999
"""
params = params or {}
params.update(self.pagination_add_ons())
return process_response(
requests.get(
self.url + "%s" % path,
params=params,
auth=self.auth))
def put(self, path, data=None):
"""
Takes an effective path (portion after https://api.e2ma.net/:account_id)
and a parameter dictionary, then passes these to :func:`requests.put`
:param path: The path portion of a URL
:type path: :class:`str`
:param data: The content to encode
:type data: :class:`object`
:rtype: JSON-encoded value or None (if 404)
Usage::
>>> from emma.adapter.requests_adapter import RequestsAdapter
>>> adptr = RequestsAdapter({
... "account_id": "1234",
... "public_key": "08192a3b4c5d6e7f",
... "private_key": "f7e6d5c4b3a29180"})
>>> adptr.put('/members/email/optout/test@example.com')
True
"""
return process_response(
requests.put(
self.url + "%s" % path,
data=json.dumps(data),
auth=self.auth))
def delete(self, path, params=None):
"""
Takes an effective path (portion after https://api.e2ma.net/:account_id)
and a parameter dictionary, then passes these to :func:`requests.delete`
:param path: The path portion of a URL
:type path: :class:`str`
:param params: The dictionary of HTTP parameters to encode
:type params: :class:`dict`
:rtype: JSON-encoded value or None (if 404)
Usage::
>>> from emma.adapter.requests_adapter import RequestsAdapter
>>> adptr = RequestsAdapter({
... "account_id": "1234",
... "public_key": "08192a3b4c5d6e7f",
... "private_key": "f7e6d5c4b3a29180"})
>>> adptr.delete('/members/123')
True
"""
return process_response(
requests.delete(
self.url + "%s" % path,
params=params,
auth=self.auth))
| {
"repo_name": "myemma/EmmaPython",
"path": "emma/adapter/requests_adapter.py",
"copies": "1",
"size": "5610",
"license": "mit",
"hash": 7003252425094027000,
"line_mean": 32.7951807229,
"line_max": 80,
"alpha_frac": 0.5374331551,
"autogenerated": false,
"ratio": 3.920335429769392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9955909295861509,
"avg_score": 0.00037185780157667706,
"num_lines": 166
} |
"""Adapter for using Jinja2 with Django."""
from __future__ import unicode_literals
import functools
import imp
import logging
import re
from django.conf import settings
from django.template.base import Origin, TemplateDoesNotExist
from django.template.context import get_standard_processors
from django.template.loader import BaseLoader
from django.utils.importlib import import_module
import jinja2
VERSION = (0, 7, 0)
__version__ = '.'.join(map(str, VERSION))
EXCLUDE_APPS = (
'admin',
'admindocs',
'registration',
'context_processors',
)
log = logging.getLogger('jingo')
_helpers_loaded = False
class Environment(jinja2.Environment):
def get_template(self, name, parent=None, globals=None):
"""Make sure our helpers get loaded before any templates."""
load_helpers()
return super(Environment, self).get_template(name, parent, globals)
def from_string(self, source, globals=None, template_class=None):
load_helpers()
return super(Environment, self).from_string(source, globals,
template_class)
def get_env():
"""Configure and return a jinja2 Environment."""
# Mimic Django's setup by loading templates from directories in
# TEMPLATE_DIRS and packages in INSTALLED_APPS.
x = ((jinja2.FileSystemLoader, settings.TEMPLATE_DIRS),
(jinja2.PackageLoader, settings.INSTALLED_APPS))
loaders = [loader(p) for loader, places in x for p in places]
opts = {'trim_blocks': True,
'extensions': ['jinja2.ext.i18n'],
'autoescape': True,
'auto_reload': settings.DEBUG,
'loader': jinja2.ChoiceLoader(loaders),
}
if hasattr(settings, 'JINJA_CONFIG'):
if hasattr(settings.JINJA_CONFIG, '__call__'):
config = settings.JINJA_CONFIG()
else:
config = settings.JINJA_CONFIG
opts.update(config)
e = Environment(**opts)
# Install null translations since gettext isn't always loaded up during
# testing.
if ('jinja2.ext.i18n' in e.extensions or
'jinja2.ext.InternationalizationExtension' in e.extensions):
e.install_null_translations()
return e
def render_to_string(request, template, context=None):
"""
Render a template into a string.
"""
def get_context():
c = {} if context is None else context.copy()
for processor in get_standard_processors():
c.update(processor(request))
return c
# If it's not a Template, it must be a path to be loaded.
if not isinstance(template, jinja2.environment.Template):
template = env.get_template(template)
return template.render(get_context())
def load_helpers():
"""Try to import ``helpers.py`` from each app in INSTALLED_APPS."""
# We want to wait as long as possible to load helpers so there aren't any
# weird circular imports with jingo.
global _helpers_loaded
if _helpers_loaded:
return
_helpers_loaded = True
from jingo import helpers # noqa
for app in settings.INSTALLED_APPS:
try:
app_path = import_module(app).__path__
except AttributeError:
continue
try:
imp.find_module('helpers', app_path)
except ImportError:
continue
import_module('%s.helpers' % app)
class Register(object):
"""Decorators to add filters and functions to the template Environment."""
def __init__(self, env):
self.env = env
def filter(self, f=None, override=True):
"""Adds the decorated function to Jinja's filter library."""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
return f(*args, **kw)
return self.filter(wrapper, override)
if not f:
return decorator
if override or f.__name__ not in self.env.filters:
self.env.filters[f.__name__] = f
return f
def function(self, f=None, override=True):
"""Adds the decorated function to Jinja's global namespace."""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
return f(*args, **kw)
return self.function(wrapper, override)
if not f:
return decorator
if override or f.__name__ not in self.env.globals:
self.env.globals[f.__name__] = f
return f
def inclusion_tag(self, template):
"""Adds a function to Jinja, but like Django's @inclusion_tag."""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
context = f(*args, **kw)
t = env.get_template(template).render(context)
return jinja2.Markup(t)
return self.function(wrapper)
return decorator
env = get_env()
register = Register(env)
class Template(jinja2.Template):
def render(self, context={}):
"""Render's a template, context can be a Django Context or a
dictionary.
"""
# flatten the Django Context into a single dictionary.
context_dict = {}
if hasattr(context, 'dicts'):
for d in context.dicts:
context_dict.update(d)
else:
context_dict = context
# Django Debug Toolbar needs a RequestContext-like object in order
# to inspect context.
class FakeRequestContext:
dicts = [context]
context = FakeRequestContext()
# Used by debug_toolbar.
if settings.TEMPLATE_DEBUG:
from django.test import signals
self.origin = Origin(self.filename)
signals.template_rendered.send(sender=self, template=self,
context=context)
return super(Template, self).render(context_dict)
class Loader(BaseLoader):
is_usable = True
env.template_class = Template
def __init__(self):
super(Loader, self).__init__()
include_pattern = getattr(settings, 'JINGO_INCLUDE_PATTERN', None)
if include_pattern:
self.include_re = re.compile(include_pattern)
else:
self.include_re = None
def _valid_template(self, template_name):
if self.include_re:
if not self.include_re.search(template_name):
return False
if hasattr(template_name, 'split'):
app = template_name.split('/')[0]
if app in getattr(settings, 'JINGO_EXCLUDE_APPS', EXCLUDE_APPS):
return False
return True
def load_template(self, template_name, template_dirs=None):
if not self._valid_template(template_name):
raise TemplateDoesNotExist(template_name)
try:
template = env.get_template(template_name)
return template, template.filename
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
def load_template_source(self, template_name, template_dirs=None):
if not self._valid_template(template_name):
raise TemplateDoesNotExist(template_name)
try:
template = env.get_template(template_name)
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
with open(template.filename, 'rb') as fp:
return (fp.read().decode(settings.FILE_CHARSET), template.filename)
| {
"repo_name": "jsocol/jingo",
"path": "jingo/__init__.py",
"copies": "1",
"size": "7562",
"license": "bsd-3-clause",
"hash": -2789610080519373000,
"line_mean": 30.2479338843,
"line_max": 79,
"alpha_frac": 0.60711452,
"autogenerated": false,
"ratio": 4.328563251287922,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5435677771287921,
"avg_score": null,
"num_lines": null
} |
# adapter from audio stream to asynchronous graph
import time
import warnings
import numpy as np
from typing import Tuple
import queue
from . import AudioHardware as AH
from ..graph import Port, Block, UnderrunWarning
from ..graph.typing import Array
from .stream import IOStream
class InStream_SourceBlock(IOStream, Block):
inputs = []
outputs = [Port(Tuple[Array[[None, 'nChannelsPerFrame'], np.float32], float, float])]
def __init__(self, ios):
self.ios = ios
self.nChannelsPerFrame = self.ios.nChannelsPerFrame(AH.kAudioObjectPropertyScopeInput)
super().__init__()
# IOStream methods
def write(self, frames, inputTime, now):
if self.output_queues[0].closed:
return
self.output(((frames, inputTime, now),))
self.notify()
def inDone(self):
return self.output_queues[0].closed
class OutStream_SinkBlock(IOStream, Block):
inputs = [Port(Array[[None, 'nChannelsPerFrame'], np.float32])]
outputs = []
def __init__(self):
super().__init__()
self.outFragment = None
self.warnOnUnderrun = True
# IOStream methods
def read(self, nFrames, outputTime, now):
result = np.empty((nFrames, self.nChannelsPerFrame), np.float32)
i = 0
if self.outFragment is not None:
n = min(self.outFragment.shape[0], nFrames)
result[:n] = self.outFragment[:n]
i += n
if n < self.outFragment.shape[0]:
self.outFragment = self.outFragment[n:]
else:
self.outFragment = None
while i < nFrames:
try:
fragment = self.input1(0)
except queue.Empty:
result[i:] = 0
if self.warnOnUnderrun:
warnings.warn('%s underrun' % self.__class__.__name__, UnderrunWarning)
break
if fragment.ndim != 2 or fragment.shape[1] != self.nChannelsPerFrame:
raise ValueError('shape mismatch')
n = min(nFrames-i, fragment.shape[0])
result[i:i+n] = fragment[:n]
i += n
if fragment.shape[0] > n:
self.outFragment = fragment[n:]
return result
def outDone(self):
return self.input_queues[0].closed
class IOSession_Block(Block):
inputs = []
outputs = []
def __init__(self, ios):
super().__init__()
self.ios = ios
def start(self):
super().start()
self.ios.start()
def stopped(self):
self.ios.stop()
super().stopped()
# software loopback
class AudioBypass_Block(Block):
inputs = [Port(Array[[None, 'nChannelsPerFrame'], np.float32])]
outputs = [Port(Tuple[Array[[None, 'nChannelsPerFrame'], np.float32], float, float])]
def process(self):
for frames, in self.iterinput():
t = time.monotonic()
self.output1(0, (frames, t, t))
def injectSilence(self):
t = time.monotonic()
self.output1(0, (np.zeros((1000, self.nChannelsPerFrame)), t, t))
self.notify()
| {
"repo_name": "piannucci/blurt",
"path": "blurt_py_80211/streaming/blurt/audio/graph_adapter.py",
"copies": "1",
"size": "3122",
"license": "mit",
"hash": 7913129483973551000,
"line_mean": 29.3106796117,
"line_max": 94,
"alpha_frac": 0.5800768738,
"autogenerated": false,
"ratio": 3.8590852904820765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49391621642820766,
"avg_score": null,
"num_lines": null
} |
"""Adapter layer to distributed job manager server api
"""
import os
import os.path
import logging
import datetime
from fabric.api import *
from fabric.operations import put
import fabric.network
from file_locator import FileLocator
import utils
from dist_job_mgr.client import get_local_connection
from dist_job_mgr.version import VERSION
import dist_job_mgr.common as common
class DjmAdapterError(Exception):
pass
logger = logging.getLogger(__name__)
def get_djm_connection():
fl = FileLocator()
return get_local_connection(fl.get_djm_server_dir())
class DjmJob(object):
def __init__(self, c, job_id, nodes):
self.c = c
self.job_id = job_id
self.nodes = nodes
self.nodes_by_name = {}
for node in nodes:
if node["private_ip"]!=None:
ip_address = node["private_ip"]
else:
ip_address = node["public_ip"]
if not ip_address:
raise Exception("Neither public ip address nor private ip address specified for node %s, need to specify at least one" % node["name"])
node["datablox_ip_address"] = ip_address
self.nodes_by_name[node["name"]] = node
self.nodes_except_master = filter(lambda name: name!="master",
[node["name"] for node in self.nodes])
# set up the fabric nodes
env.hosts = [node["name"] for node in self.nodes]
env.roledefs['workers'] = self.nodes_except_master
for node in nodes:
env.hostdefs[node["name"]] = "%s@%s" % (node["os_username"],
node["contact_address"])
logger.debug("Node %s defined as %s" % (node["name"],
env.hostdefs[node["name"]]))
if "master" in env.hosts:
env.roledefs['master'] = ['master',]
else:
env.roledefs['master'] = []
def has_node(self, node_name):
return self.nodes_by_name.has_key(node_name)
def get_node(self, node_name):
return self.nodes_by_name[node_name]
def stop_job(self, successful=True, msg=None):
if successful:
self.c.stop_job(self.job_id,
common.JobStatus.JOB_SUCCESSFUL,
comment=msg)
logger.debug("Stopped job %s, status=JOB_SUCESSFUL" % self.job_id)
else:
self.c.stop_job(self.job_id,
common.JobStatus.JOB_FAILED,
comment=msg)
logger.debug("Stopped job %s, status=FAILED" % self.job_id)
fabric.network.disconnect_all()
@task
@parallel
@roles("workers")
def setup_worker_node(reuse_existing_installs):
logger.info("setup_worker_node: reuse_existing_installs = %s" % reuse_existing_installs)
fl = FileLocator()
dist_path = fl.get_engage_distribution_file()
# todo: don't copy engage if existing install can be reused
setup_script = os.path.join(fl.get_sw_packages_dir(), "setup_caretaker.sh")
put(setup_script, "~/setup_caretaker.sh")
run("chmod 755 ~/setup_caretaker.sh")
if reuse_existing_installs:
run("~/setup_caretaker.sh --reuse-existing-install")
else:
put(dist_path, "~/" + os.path.basename(dist_path))
run("~/setup_caretaker.sh")
def start_job_and_get_nodes(node_list, config_file_name, total_nodes=None,
reuse_existing_installs=True):
"""Given a node list and optional number of nodes, try to get the
requested nodes and start a job.
"""
if not total_nodes:
total_nodes = len(node_list)
if total_nodes<1:
raise DjmAdapterError("Must have at least one node")
c = get_djm_connection()
# make sure there aren't any dead jobs laying around
c.cleanup_dead_coordinators()
pool = None
for node_name in node_list:
n = c.find_node_by_name(node_name)
if not n:
raise DjmAdapterError("Node '%s' not defined" % node_name)
if n["pool"]:
if pool and pool!=n["pool"]:
raise DjmAdapterError("Cannot take nodes from both pool %s and pool %s"%
(pool, n["pool"]))
pool = n["pool"]
start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
j = c.start_job(config_file_name, common.JobType.ONE_TIME_JOB,
total_nodes, "Datablox job started %s" % start_time,
node_pool_name=pool, requested_nodes=node_list)
logger.info("Started DJM job %s" % j)
try:
fl = FileLocator()
allocated_nodes = c.query_nodes(job_id=j)
djm_job = DjmJob(c, j, allocated_nodes)
logger.info("Setting up nodes")
# for all the non-master nodes, we setup the caretaker
nodes_except_master = djm_job.nodes_except_master
if len(nodes_except_master)>0:
execute(setup_worker_node, reuse_existing_installs)
# make sure the master node has the caretaker running
if djm_job.has_node("master"):
utils.run_svcctl(fl, ["start", "all"])
return djm_job
except KeyboardInterrupt:
logger.exception("Got keyboard interrupt in node initialization")
c.stop_job(j, common.JobStatus.JOB_FAILED,
comment="Got keyboard interrupt in node initialization")
raise
except Exception, e:
logger.exception("DJM problem in node initialization: %s" % e)
c.stop_job(j, common.JobStatus.JOB_FAILED,
comment="DJM problem in node initialization: %s" % e)
raise
| {
"repo_name": "mpi-sws-rse/datablox",
"path": "engage/adapter_pkg/datablox_engage_adapter/djm_server.py",
"copies": "1",
"size": "5697",
"license": "apache-2.0",
"hash": 2887419267799295500,
"line_mean": 37.7551020408,
"line_max": 150,
"alpha_frac": 0.5875021941,
"autogenerated": false,
"ratio": 3.708984375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47964865691,
"avg_score": null,
"num_lines": null
} |
import requests
from hs_restclient import HydroShare, DEFAULT_HOSTNAME, HydroShareNotAuthorized, HydroShareNotFound, \
HydroShareHTTPException, default_progress_callback
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from oauthlib.oauth2.rfc6749.errors import TokenExpiredError
class HydroShareAdapter(HydroShare):
def __init__(self, hostname=DEFAULT_HOSTNAME, port=None, use_https=True, verify=True,
auth=None, default_headers=None):
self._default_headers = default_headers
super(HydroShareAdapter, self).__init__(hostname=hostname, port=port, use_https=use_https, verify=verify,
auth=auth)
def _build_params(self, params): # type: (dict) -> str
param_vals = ['{param}={val}'.format(param=p, val=v) for p, v in params.iteritems()]
return "?{params}".format(params="&".join(param_vals))
def _request(self, method, url, params=None, data=None, files=None, headers=None, stream=False, **kwargs):
timeout = None
if 'timeout' in kwargs:
timeout = kwargs.get('timeout', None)
if self._default_headers and headers:
headers.update(self._default_headers)
elif self._default_headers:
headers = self._default_headers
return self.session.request(method, url, params=params, data=data, files=files, headers=headers,
stream=stream, verify=self.verify, timeout=timeout, **kwargs)
def getSystemMetadata(self, pid, **kwargs):
"""
Returns system metadata for a resource which includes the dublin core elements
Note: HydroShareAdapter overrides it's super class method, getSystemMetadata(), so 'timeout' can be
included in **kwargs. By default, the requests library does not have timeout limit for HTTP requests, and
some views wait for getSystemMetadata() to complete an HTTP request to hydroshare.org before the views are
rendered.
"""
timeout = None
if 'timeout' in kwargs:
timeout = kwargs.get('timeout', None)
url = "{url_base}/resource/{pid}/sysmeta/".format(url_base=self.url_base,
pid=pid)
headers = self._default_headers
access_token = self.auth.token.get('access_token', None)
if access_token is not None:
headers['Authorization'] = 'Bearer {0}'.format(access_token)
req = requests.get(url, headers=headers, timeout=timeout)
if req.status_code != 200:
if req.status_code == 403:
raise HydroShareNotAuthorized((req.request.method, url))
elif req.status_code == 404:
raise HydroShareNotFound((pid,))
else:
raise HydroShareHTTPException((url, req.request.method, req.status_code))
return req.json()
def addResourceFile(self, pid, resource_file, resource_filename=None, progress_callback=None):
url = "{url_base}/resource/{pid}/files/".format(url_base=self.url_base,
pid=pid)
params = {}
close_fd = self._prepareFileForUpload(params, resource_file, resource_filename)
encoder = MultipartEncoder(params)
if progress_callback is None:
progress_callback = default_progress_callback
monitor = MultipartEncoderMonitor(encoder, progress_callback)
r = self._request('POST', url, data=monitor, headers={'Content-Type': monitor.content_type,
'Connection': 'keep-alive',
'Keep-Alive': 'timeout=10, max=1000'})
if close_fd:
fd = params['file'][1]
fd.close()
if r.status_code != 201:
if r.status_code == 403:
raise HydroShareNotAuthorized(('POST', url))
elif r.status_code == 404:
raise HydroShareNotFound((pid,))
else:
raise HydroShareHTTPException((url, 'POST', r.status_code))
response = r.json()
assert (response['resource_id'] == pid)
return response['resource_id']
def getAccessRules(self, pid):
"""
Get access rule for a resource.
"""
url = "{url_base}/resource/{pid}/access/".format(url_base=self.url_base, pid=pid)
r = self._request('GET', url)
if r.status_code != 200:
if r.status_code == 403:
raise HydroShareNotAuthorized(('GET', url))
elif r.status_code == 404:
raise HydroShareNotFound((pid,))
else:
raise HydroShareHTTPException((url, 'GET', r.status_code))
return r.json()
def updateKeywords(self, pid, keywords): # type: (str, set) -> object
url = "{url_base}/resource/{pid}/scimeta/elements/".format(url_base=self.url_base, pid=pid)
subjects = []
for keyword in keywords:
subjects.append({"value": keyword})
r = self.session.request('PUT', url, json={"subjects": subjects})
if r.status_code != 202:
if r.status_code == 403:
raise HydroShareNotAuthorized(('PUT', url))
elif r.status_code == 404:
raise HydroShareNotFound((pid,))
else:
raise HydroShareHTTPException((url, 'PUT', r.status_code, keywords))
else:
return r.json().get('subjects', dict())
| {
"repo_name": "ODM2/ODM2WebSDL",
"path": "src/hydroshare_util/adapter.py",
"copies": "1",
"size": "5639",
"license": "bsd-3-clause",
"hash": -5919317772670111000,
"line_mean": 42.0458015267,
"line_max": 114,
"alpha_frac": 0.5829047703,
"autogenerated": false,
"ratio": 4.26228269085412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017218256302632873,
"num_lines": 131
} |
"""Adapters for connecting time-series data to Bokeh visualizations
To use Bokeh visualizations, do the following
1. Create an instance of BokehPlotManager
2. Register all plots with the BokehPlotManager using BokehPlot objects.
BokehPlot objects provide the basic formatting of the plots.
3. Call BokehPlotManager's start() routine to start the visualization
4. Make BokehPlotManager subscribe to the event streams.
TODO: Step 2 and step 4 should be combined into one
TODO: Currently, we do not support BokehPlot's with multiple plots
TODO: formatting, etc
"""
"""Define an event type for time-series data from sensors.
from collections import namedtuple
# Define a sensor event as a tuple of sensor id, timestamp, and value.
# A 'sensor' is just a generator of sensor events.
SensorEvent = namedtuple('SensorEvent', ['sensor_id', 'ts', 'val'])
"""
import datetime
import logging
import functools
from math import pi
import threading, queue
from bokeh.charts import TimeSeries, show, output_file, output_server
from bokeh.plotting import figure, curdoc
from bokeh.layouts import column # to show two or more plots arranged in a column
import numpy as np
from bokeh.models import ColumnDataSource
from bokeh.models import DatetimeTickFormatter
from bokeh.client import push_session
from antevents.base import Filter, filtermethod
logger = logging.getLogger(__name__)
TOOLS="pan,wheel_zoom,box_zoom,reset,save"
tooltips=[
("Open", "@Open"),
("Close", "@Close"),
("High", "@High"),
("Low", "@Low"),
("Volume", "@Volume")
]
def bokeh_timeseries_mapper(events):
# a row is 'timestamp', 'datetime', 'sensor_id', 'value'
ts = [ ]
dttm = [ ]
value = [ ]
for r in events:
t = float(r.ts)
print(t)
# dt = datetime.datetime.utcfromtimestamp(t)
ts.append(t)
# dttm.append(r['datetime'])
value.append(r.val)
return { 'timestamp' : ts, 'value' : value }
def bokeh_default_mapper(csv):
return csv
class BokehPlotWorker(threading.Thread):
def __init__(self, plotters):
threading.Thread.__init__(self)
self.plotters = plotters
def update(self, name):
print("In update")
whichqueue = self.plotters[name]['queue']
whichsource = self.plotters[name]['plot_specs'].source
try:
data = whichqueue.get_nowait()
if data:
# ts = datetime.datetime.fromtimestamp(data.ts)
ts = (data.ts)
val = data.val
print('data = ', data)
new_data = dict(timestamp=[ts], value=[val])
print('newdata = ', new_data)
whichsource.stream(new_data)
except queue.Empty:
pass
def make_fig(self, plot_source):
plot_specs = plot_source['plot_specs']
p = figure(plot_height=400, tools=TOOLS, y_axis_location='left', title=plot_specs.name)
p.xaxis.axis_label = plot_specs.x_axis_label
p.yaxis.axis_label = plot_specs.y_axis_label
p.x_range.follow = "end"
p.x_range.follow_interval = 10
p.x_range.range_padding = 0
# p.xaxis.formatter=DatetimeTickFormatter(dict(seconds=["%S"],minutes=["%M"],hours=["%d %B %Y"],days=["%d %B %Y"],months=["%d %B %Y"],years=["%d %B %Y"]))
p.xaxis.major_label_orientation = pi/4
p.line(x=plot_specs.x_axis_label, y=plot_specs.y_axis_label, color="blue", source=plot_specs.source)
p.circle(x=plot_specs.x_axis_label, y=plot_specs.y_axis_label, color="red", source=plot_specs.source)
curdoc().add_periodic_callback(functools.partial(self.update, name=plot_specs.name), plot_specs.update_period) #period in ms
return p
def run(self):
print("In thread.run")
self.figs = [self.make_fig(self.plotters[name]) for name in self.plotters]
self.session = push_session(curdoc())
self.session.show(column(self.figs))
curdoc().title = 'AntEvent Streams'
self.session.loop_until_closed()
class BokehPlot(object):
def __init__(self, name, y_axis_label="", x_axis_label="timestamp", update_period_in_ms=500):
self.name = name
self.x_axis_label = x_axis_label
self.y_axis_label = y_axis_label
self.update_period = update_period_in_ms
self.source = ColumnDataSource(dict({ self.x_axis_label: [], self.y_axis_label: []} ))
class BokehPlotManager(Filter):
def __init__(self):
self.plotters = { }
self.open_for_registration = True
self.started = False
def register(self, plot):
if self.open_for_registration:
self.plotters[plot.name] = { 'queue' : queue.Queue(), 'plot_specs' : plot }
else:
raise Exception("Bokeh Adapter: Plot manager does not dynamically add registrations.")
def start(self):
self.open_for_registration = False
self.bokeh_plot_worker = BokehPlotWorker(self.plotters)
self.bokeh_plot_worker.start()
self.started = True
def on_next(self, t):
whichplot, data = t
assert self.started, "BokehPlotManager: Data sent without initialization"
if whichplot in self.plotters:
self.plotters[whichplot]['queue'].put(data)
else:
raise Exception("Plot %s not found among registered plots", whichplot)
def on_completed(self):
exit(1)
def on_error(self):
pass
# The following is deprecated. Use BokehPlotManager
class BokehOutputWorker(threading.Thread):
source = ColumnDataSource(dict(timestamp=[], value=[]))
def __init__(self, sensor_id, datasource):
threading.Thread.__init__(self)
self.q = datasource
self.title = sensor_id
self.counter = 0
def update(self):
print("In update")
try:
data = self.q.get_nowait()
if data:
print('data = ', data)
ts = data.ts
val = data.val
new_data = dict(timestamp=[ts], value=[val])
self.source.stream(new_data, 300)
self.counter = 0
except queue.Empty:
pass
self.counter = self.counter + 1
if self.counter == 10:
exit(0)
def run(self):
print("In thread.run")
self.p = figure(plot_height=500, tools=TOOLS, y_axis_location='left', title=self.title)
self.p.x_range.follow = "end"
self.p.xaxis.axis_label = "Timestamp"
self.p.x_range.follow_interval = 100
self.p.x_range.range_padding = 0
self.p.line(x="timestamp", y="value", color="blue", source=self.source)
self.p.circle(x="timestamp", y="value", color="red", source=self.source)
self.session = push_session(curdoc())
curdoc().add_periodic_callback(self.update, 100) #period in ms
self.session.show(column(self.p))
curdoc().title = 'Sensor'
self.session.loop_until_closed()
# def register(self, d, sourceq):
# source = ColumnDataSource(dict(d))
# self.p.line(x=d[0], y=d[1], color="orange", source=source)
# curdoc().add_periodic_callback(self.update, 100) #period in ms
class BokehStreamer(Filter):
def __init__(self, initial_csv, io_loop=None):
self.q = queue.Queue()
self.bokeh_worker = BokehOutputWorker("Sensor", self.q)
self.bokeh_worker.start()
def on_next(self, x):
print("next:", x)
self.q.put(x)
def on_completed(self):
self.q.join()
self.bokeh_worker.stop()
self._dispatch_completed()
def on_error(self, e):
self.q.join()
self._dispatch_error(e)
def bokeh_output_streaming(csv):
"""Write an event stream to a Bokeh visualizer
"""
b = BokehStreamer(csv)
| {
"repo_name": "mpi-sws-rse/antevents-python",
"path": "antevents/adapters/bokeh.py",
"copies": "1",
"size": "7891",
"license": "apache-2.0",
"hash": 3266741370045244000,
"line_mean": 31.8791666667,
"line_max": 162,
"alpha_frac": 0.6146242555,
"autogenerated": false,
"ratio": 3.6164069660861595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9659789650089945,
"avg_score": 0.014248314299242787,
"num_lines": 240
} |
"""Adapters for converting to and from a type's value according to an
IConvertible protocol.
"""
from datetime import date, time
try:
import decimal
haveDecimal = True
except ImportError:
haveDecimal = False
from formal import iformal, validation
from zope.interface import implements
class _Adapter(object):
def __init__(self, original):
self.original = original
class NullConverter(_Adapter):
implements( iformal.IStringConvertible )
def fromType(self, value):
if value is None:
return None
return value
def toType(self, value):
if value is None:
return None
return value
class NumberToStringConverter(_Adapter):
implements( iformal.IStringConvertible )
cast = None
def fromType(self, value):
if value is None:
return None
return str(value)
def toType(self, value):
if value is not None:
value = value.strip()
if not value:
return None
# "Cast" the value to the correct type. For some strange reason,
# Python's decimal.Decimal type raises an ArithmeticError when it's
# given a dodgy value.
try:
value = self.cast(value)
except (ValueError, ArithmeticError):
raise validation.FieldValidationError("Not a valid number")
return value
class IntegerToStringConverter(NumberToStringConverter):
cast = int
class FloatToStringConverter(NumberToStringConverter):
cast = float
if haveDecimal:
class DecimalToStringConverter(NumberToStringConverter):
cast = decimal.Decimal
class BooleanToStringConverter(_Adapter):
implements( iformal.IStringConvertible )
def fromType(self, value):
if value is None:
return None
if value:
return 'True'
return 'False'
def toType(self, value):
if value is not None:
value = value.strip()
if not value:
return None
if value not in ('True', 'False'):
raise validation.FieldValidationError('%r should be either True or False'%value)
return value == 'True'
class DateToStringConverter(_Adapter):
implements( iformal.IStringConvertible )
def fromType(self, value):
if value is None:
return None
return value.isoformat()
def toType(self, value):
if value is not None:
value = value.strip()
if not value:
return None
return self.parseDate(value)
def parseDate(self, value):
try:
y, m, d = [int(p) for p in value.split('-')]
except ValueError:
raise validation.FieldValidationError('Invalid date')
try:
value = date(y, m, d)
except ValueError, e:
raise validation.FieldValidationError('Invalid date: '+str(e))
return value
class TimeToStringConverter(_Adapter):
implements( iformal.IStringConvertible )
def fromType(self, value):
if value is None:
return None
return value.isoformat()
def toType(self, value):
if value is not None:
value = value.strip()
if not value:
return None
return self.parseTime(value)
def parseTime(self, value):
if '.' in value:
value, ms = value.split('.')
else:
ms = 0
try:
parts = value.split(':')
if len(parts)<2 or len(parts)>3:
raise ValueError()
if len(parts) == 2:
h, m = parts
s = 0
else:
h, m, s = parts
h, m, s, ms = int(h), int(m), int(s), int(ms)
except:
raise validation.FieldValidationError('Invalid time')
try:
value = time(h, m, s, ms)
except ValueError, e:
raise validation.FieldValidationError('Invalid time: '+str(e))
return value
class DateToDateTupleConverter(_Adapter):
implements( iformal.IDateTupleConvertible )
def fromType(self, value):
if value is None:
return None, None, None
return value.year, value.month, value.day
def toType(self, value):
if value is None:
return None
try:
value = date(*value)
except (TypeError, ValueError), e:
raise validation.FieldValidationError('Invalid date: '+str(e))
return value
class SequenceToStringConverter(_Adapter):
implements( iformal.IStringConvertible)
def fromType(self, value):
if value is None:
return None
import cStringIO as StringIO
import csv
sf = StringIO.StringIO()
writer = csv.writer(sf)
writer.writerow(value)
sf.seek(0,0)
return sf.read().strip()
def toType(self, value):
if not value:
return None
import cStringIO as StringIO
import csv
sf = StringIO.StringIO()
csvReader = csv.reader(sf)
sf.write(value)
sf.seek(0,0)
return csvReader.next()
| {
"repo_name": "emgee/formal",
"path": "formal/converters.py",
"copies": "1",
"size": "5383",
"license": "mit",
"hash": -5309029155154310000,
"line_mean": 25.1310679612,
"line_max": 92,
"alpha_frac": 0.5652981609,
"autogenerated": false,
"ratio": 4.604790419161676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5670088580061676,
"avg_score": null,
"num_lines": null
} |
"""Adapters for IPython msg spec versions."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
from IPython.core.release import kernel_protocol_version_info
from IPython.utils.tokenutil import token_at_cursor
def code_to_line(code, cursor_pos):
"""Turn a multiline code block and cursor position into a single line
and new cursor position.
For adapting ``complete_`` and ``object_info_request``.
"""
if not code:
return "", 0
for line in code.splitlines(True):
n = len(line)
if cursor_pos > n:
cursor_pos -= n
else:
break
return line, cursor_pos
class Adapter(object):
"""Base class for adapting messages
Override message_type(msg) methods to create adapters.
"""
msg_type_map = {}
def update_header(self, msg):
return msg
def update_metadata(self, msg):
return msg
def update_msg_type(self, msg):
header = msg['header']
msg_type = header['msg_type']
if msg_type in self.msg_type_map:
msg['msg_type'] = header['msg_type'] = self.msg_type_map[msg_type]
return msg
def handle_reply_status_error(self, msg):
"""This will be called *instead of* the regular handler
on any reply with status != ok
"""
return msg
def __call__(self, msg):
msg = self.update_header(msg)
msg = self.update_metadata(msg)
msg = self.update_msg_type(msg)
header = msg['header']
handler = getattr(self, header['msg_type'], None)
if handler is None:
return msg
# handle status=error replies separately (no change, at present)
if msg['content'].get('status', None) in {'error', 'aborted'}:
return self.handle_reply_status_error(msg)
return handler(msg)
def _version_str_to_list(version):
"""convert a version string to a list of ints
non-int segments are excluded
"""
v = []
for part in version.split('.'):
try:
v.append(int(part))
except ValueError:
pass
return v
class V5toV4(Adapter):
"""Adapt msg protocol v5 to v4"""
version = '4.1'
msg_type_map = {
'execute_result' : 'pyout',
'execute_input' : 'pyin',
'error' : 'pyerr',
'inspect_request' : 'object_info_request',
'inspect_reply' : 'object_info_reply',
}
def update_header(self, msg):
msg['header'].pop('version', None)
return msg
# shell channel
def kernel_info_reply(self, msg):
v4c = {}
content = msg['content']
for key in ('language_version', 'protocol_version'):
if key in content:
v4c[key] = _version_str_to_list(content[key])
if content.get('implementation', '') == 'ipython' \
and 'implementation_version' in content:
v4c['ipython_version'] = _version_str_to_list(content['implementation_version'])
language_info = content.get('language_info', {})
language = language_info.get('name', '')
v4c.setdefault('language', language)
if 'version' in language_info:
v4c.setdefault('language_version', _version_str_to_list(language_info['version']))
msg['content'] = v4c
return msg
def execute_request(self, msg):
content = msg['content']
content.setdefault('user_variables', [])
return msg
def execute_reply(self, msg):
content = msg['content']
content.setdefault('user_variables', {})
# TODO: handle payloads
return msg
def complete_request(self, msg):
content = msg['content']
code = content['code']
cursor_pos = content['cursor_pos']
line, cursor_pos = code_to_line(code, cursor_pos)
new_content = msg['content'] = {}
new_content['text'] = ''
new_content['line'] = line
new_content['block'] = None
new_content['cursor_pos'] = cursor_pos
return msg
def complete_reply(self, msg):
content = msg['content']
cursor_start = content.pop('cursor_start')
cursor_end = content.pop('cursor_end')
match_len = cursor_end - cursor_start
content['matched_text'] = content['matches'][0][:match_len]
content.pop('metadata', None)
return msg
def object_info_request(self, msg):
content = msg['content']
code = content['code']
cursor_pos = content['cursor_pos']
line, _ = code_to_line(code, cursor_pos)
new_content = msg['content'] = {}
new_content['oname'] = token_at_cursor(code, cursor_pos)
new_content['detail_level'] = content['detail_level']
return msg
def object_info_reply(self, msg):
"""inspect_reply can't be easily backward compatible"""
msg['content'] = {'found' : False, 'oname' : 'unknown'}
return msg
# iopub channel
def stream(self, msg):
content = msg['content']
content['data'] = content.pop('text')
return msg
def display_data(self, msg):
content = msg['content']
content.setdefault("source", "display")
data = content['data']
if 'application/json' in data:
try:
data['application/json'] = json.dumps(data['application/json'])
except Exception:
# warn?
pass
return msg
# stdin channel
def input_request(self, msg):
msg['content'].pop('password', None)
return msg
class V4toV5(Adapter):
"""Convert msg spec V4 to V5"""
version = '5.0'
# invert message renames above
msg_type_map = {v:k for k,v in V5toV4.msg_type_map.items()}
def update_header(self, msg):
msg['header']['version'] = self.version
return msg
# shell channel
def kernel_info_reply(self, msg):
content = msg['content']
for key in ('protocol_version', 'ipython_version'):
if key in content:
content[key] = '.'.join(map(str, content[key]))
content.setdefault('protocol_version', '4.1')
if content['language'].startswith('python') and 'ipython_version' in content:
content['implementation'] = 'ipython'
content['implementation_version'] = content.pop('ipython_version')
language = content.pop('language')
language_info = content.setdefault('language_info', {})
language_info.setdefault('name', language)
if 'language_version' in content:
language_version = '.'.join(map(str, content.pop('language_version')))
language_info.setdefault('version', language_version)
content['banner'] = ''
return msg
def execute_request(self, msg):
content = msg['content']
user_variables = content.pop('user_variables', [])
user_expressions = content.setdefault('user_expressions', {})
for v in user_variables:
user_expressions[v] = v
return msg
def execute_reply(self, msg):
content = msg['content']
user_expressions = content.setdefault('user_expressions', {})
user_variables = content.pop('user_variables', {})
if user_variables:
user_expressions.update(user_variables)
# Pager payloads became a mime bundle
for payload in content.get('payload', []):
if payload.get('source', None) == 'page' and ('text' in payload):
if 'data' not in payload:
payload['data'] = {}
payload['data']['text/plain'] = payload.pop('text')
return msg
def complete_request(self, msg):
old_content = msg['content']
new_content = msg['content'] = {}
new_content['code'] = old_content['line']
new_content['cursor_pos'] = old_content['cursor_pos']
return msg
def complete_reply(self, msg):
# complete_reply needs more context than we have to get cursor_start and end.
# use special end=null to indicate current cursor position and negative offset
# for start relative to the cursor.
# start=None indicates that start == end (accounts for no -0).
content = msg['content']
new_content = msg['content'] = {'status' : 'ok'}
new_content['matches'] = content['matches']
if content['matched_text']:
new_content['cursor_start'] = -len(content['matched_text'])
else:
# no -0, use None to indicate that start == end
new_content['cursor_start'] = None
new_content['cursor_end'] = None
new_content['metadata'] = {}
return msg
def inspect_request(self, msg):
content = msg['content']
name = content['oname']
new_content = msg['content'] = {}
new_content['code'] = name
new_content['cursor_pos'] = len(name)
new_content['detail_level'] = content['detail_level']
return msg
def inspect_reply(self, msg):
"""inspect_reply can't be easily backward compatible"""
content = msg['content']
new_content = msg['content'] = {'status' : 'ok'}
found = new_content['found'] = content['found']
new_content['data'] = data = {}
new_content['metadata'] = {}
if found:
lines = []
for key in ('call_def', 'init_definition', 'definition'):
if content.get(key, False):
lines.append(content[key])
break
for key in ('call_docstring', 'init_docstring', 'docstring'):
if content.get(key, False):
lines.append(content[key])
break
if not lines:
lines.append("<empty docstring>")
data['text/plain'] = '\n'.join(lines)
return msg
# iopub channel
def stream(self, msg):
content = msg['content']
content['text'] = content.pop('data')
return msg
def display_data(self, msg):
content = msg['content']
content.pop("source", None)
data = content['data']
if 'application/json' in data:
try:
data['application/json'] = json.loads(data['application/json'])
except Exception:
# warn?
pass
return msg
# stdin channel
def input_request(self, msg):
msg['content'].setdefault('password', False)
return msg
def adapt(msg, to_version=kernel_protocol_version_info[0]):
"""Adapt a single message to a target version
Parameters
----------
msg : dict
An IPython message.
to_version : int, optional
The target major version.
If unspecified, adapt to the current version for IPython.
Returns
-------
msg : dict
An IPython message appropriate in the new version.
"""
header = msg['header']
if 'version' in header:
from_version = int(header['version'].split('.')[0])
else:
# assume last version before adding the key to the header
from_version = 4
adapter = adapters.get((from_version, to_version), None)
if adapter is None:
return msg
return adapter(msg)
# one adapter per major version from,to
adapters = {
(5,4) : V5toV4(),
(4,5) : V4toV5(),
}
| {
"repo_name": "wolfram74/numerical_methods_iserles_notes",
"path": "venv/lib/python2.7/site-packages/IPython/kernel/adapter.py",
"copies": "2",
"size": "11788",
"license": "mit",
"hash": -8934437121141680000,
"line_mean": 30.6032171582,
"line_max": 94,
"alpha_frac": 0.5563284696,
"autogenerated": false,
"ratio": 4.1875666074600355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0279152833403134,
"num_lines": 373
} |
"""Adapters for Jupyter msg spec versions."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import re
import json
from datetime import datetime
from jupyter_client import protocol_version_info
def code_to_line(code, cursor_pos):
"""Turn a multiline code block and cursor position into a single line
and new cursor position.
For adapting ``complete_`` and ``object_info_request``.
"""
if not code:
return "", 0
for line in code.splitlines(True):
n = len(line)
if cursor_pos > n:
cursor_pos -= n
else:
break
return line, cursor_pos
_match_bracket = re.compile(r'\([^\(\)]+\)', re.UNICODE)
_end_bracket = re.compile(r'\([^\(]*$', re.UNICODE)
_identifier = re.compile(r'[a-z_][0-9a-z._]*', re.I|re.UNICODE)
def extract_oname_v4(code, cursor_pos):
"""Reimplement token-finding logic from IPython 2.x javascript
for adapting object_info_request from v5 to v4
"""
line, _ = code_to_line(code, cursor_pos)
oldline = line
line = _match_bracket.sub(u'', line)
while oldline != line:
oldline = line
line = _match_bracket.sub(u'', line)
# remove everything after last open bracket
line = _end_bracket.sub('', line)
matches = _identifier.findall(line)
if matches:
return matches[-1]
else:
return ''
class Adapter(object):
"""Base class for adapting messages
Override message_type(msg) methods to create adapters.
"""
msg_type_map = {}
def update_header(self, msg):
return msg
def update_metadata(self, msg):
return msg
def update_msg_type(self, msg):
header = msg['header']
msg_type = header['msg_type']
if msg_type in self.msg_type_map:
msg['msg_type'] = header['msg_type'] = self.msg_type_map[msg_type]
return msg
def handle_reply_status_error(self, msg):
"""This will be called *instead of* the regular handler
on any reply with status != ok
"""
return msg
def __call__(self, msg):
msg = self.update_header(msg)
msg = self.update_metadata(msg)
msg = self.update_msg_type(msg)
header = msg['header']
handler = getattr(self, header['msg_type'], None)
if handler is None:
return msg
# handle status=error replies separately (no change, at present)
if msg['content'].get('status', None) in {'error', 'aborted'}:
return self.handle_reply_status_error(msg)
return handler(msg)
def _version_str_to_list(version):
"""convert a version string to a list of ints
non-int segments are excluded
"""
v = []
for part in version.split('.'):
try:
v.append(int(part))
except ValueError:
pass
return v
class V5toV4(Adapter):
"""Adapt msg protocol v5 to v4"""
version = '4.1'
msg_type_map = {
'execute_result' : 'pyout',
'execute_input' : 'pyin',
'error' : 'pyerr',
'inspect_request' : 'object_info_request',
'inspect_reply' : 'object_info_reply',
}
def update_header(self, msg):
msg['header'].pop('version', None)
msg['parent_header'].pop('version', None)
return msg
# shell channel
def kernel_info_reply(self, msg):
v4c = {}
content = msg['content']
for key in ('language_version', 'protocol_version'):
if key in content:
v4c[key] = _version_str_to_list(content[key])
if content.get('implementation', '') == 'ipython' \
and 'implementation_version' in content:
v4c['ipython_version'] = _version_str_to_list(content['implementation_version'])
language_info = content.get('language_info', {})
language = language_info.get('name', '')
v4c.setdefault('language', language)
if 'version' in language_info:
v4c.setdefault('language_version', _version_str_to_list(language_info['version']))
msg['content'] = v4c
return msg
def execute_request(self, msg):
content = msg['content']
content.setdefault('user_variables', [])
return msg
def execute_reply(self, msg):
content = msg['content']
content.setdefault('user_variables', {})
# TODO: handle payloads
return msg
def complete_request(self, msg):
content = msg['content']
code = content['code']
cursor_pos = content['cursor_pos']
line, cursor_pos = code_to_line(code, cursor_pos)
new_content = msg['content'] = {}
new_content['text'] = ''
new_content['line'] = line
new_content['block'] = None
new_content['cursor_pos'] = cursor_pos
return msg
def complete_reply(self, msg):
content = msg['content']
cursor_start = content.pop('cursor_start')
cursor_end = content.pop('cursor_end')
match_len = cursor_end - cursor_start
content['matched_text'] = content['matches'][0][:match_len]
content.pop('metadata', None)
return msg
def object_info_request(self, msg):
content = msg['content']
code = content['code']
cursor_pos = content['cursor_pos']
line, _ = code_to_line(code, cursor_pos)
new_content = msg['content'] = {}
new_content['oname'] = extract_oname_v4(code, cursor_pos)
new_content['detail_level'] = content['detail_level']
return msg
def object_info_reply(self, msg):
"""inspect_reply can't be easily backward compatible"""
msg['content'] = {'found' : False, 'oname' : 'unknown'}
return msg
# iopub channel
def stream(self, msg):
content = msg['content']
content['data'] = content.pop('text')
return msg
def display_data(self, msg):
content = msg['content']
content.setdefault("source", "display")
data = content['data']
if 'application/json' in data:
try:
data['application/json'] = json.dumps(data['application/json'])
except Exception:
# warn?
pass
return msg
# stdin channel
def input_request(self, msg):
msg['content'].pop('password', None)
return msg
class V4toV5(Adapter):
"""Convert msg spec V4 to V5"""
version = '5.0'
# invert message renames above
msg_type_map = {v:k for k,v in V5toV4.msg_type_map.items()}
def update_header(self, msg):
msg['header']['version'] = self.version
if msg['parent_header']:
msg['parent_header']['version'] = self.version
return msg
# shell channel
def kernel_info_reply(self, msg):
content = msg['content']
for key in ('protocol_version', 'ipython_version'):
if key in content:
content[key] = '.'.join(map(str, content[key]))
content.setdefault('protocol_version', '4.1')
if content['language'].startswith('python') and 'ipython_version' in content:
content['implementation'] = 'ipython'
content['implementation_version'] = content.pop('ipython_version')
language = content.pop('language')
language_info = content.setdefault('language_info', {})
language_info.setdefault('name', language)
if 'language_version' in content:
language_version = '.'.join(map(str, content.pop('language_version')))
language_info.setdefault('version', language_version)
content['banner'] = ''
return msg
def execute_request(self, msg):
content = msg['content']
user_variables = content.pop('user_variables', [])
user_expressions = content.setdefault('user_expressions', {})
for v in user_variables:
user_expressions[v] = v
return msg
def execute_reply(self, msg):
content = msg['content']
user_expressions = content.setdefault('user_expressions', {})
user_variables = content.pop('user_variables', {})
if user_variables:
user_expressions.update(user_variables)
# Pager payloads became a mime bundle
for payload in content.get('payload', []):
if payload.get('source', None) == 'page' and ('text' in payload):
if 'data' not in payload:
payload['data'] = {}
payload['data']['text/plain'] = payload.pop('text')
return msg
def complete_request(self, msg):
old_content = msg['content']
new_content = msg['content'] = {}
new_content['code'] = old_content['line']
new_content['cursor_pos'] = old_content['cursor_pos']
return msg
def complete_reply(self, msg):
# complete_reply needs more context than we have to get cursor_start and end.
# use special end=null to indicate current cursor position and negative offset
# for start relative to the cursor.
# start=None indicates that start == end (accounts for no -0).
content = msg['content']
new_content = msg['content'] = {'status' : 'ok'}
new_content['matches'] = content['matches']
if content['matched_text']:
new_content['cursor_start'] = -len(content['matched_text'])
else:
# no -0, use None to indicate that start == end
new_content['cursor_start'] = None
new_content['cursor_end'] = None
new_content['metadata'] = {}
return msg
def inspect_request(self, msg):
content = msg['content']
name = content['oname']
new_content = msg['content'] = {}
new_content['code'] = name
new_content['cursor_pos'] = len(name)
new_content['detail_level'] = content['detail_level']
return msg
def inspect_reply(self, msg):
"""inspect_reply can't be easily backward compatible"""
content = msg['content']
new_content = msg['content'] = {'status' : 'ok'}
found = new_content['found'] = content['found']
new_content['data'] = data = {}
new_content['metadata'] = {}
if found:
lines = []
for key in ('call_def', 'init_definition', 'definition'):
if content.get(key, False):
lines.append(content[key])
break
for key in ('call_docstring', 'init_docstring', 'docstring'):
if content.get(key, False):
lines.append(content[key])
break
if not lines:
lines.append("<empty docstring>")
data['text/plain'] = '\n'.join(lines)
return msg
# iopub channel
def stream(self, msg):
content = msg['content']
content['text'] = content.pop('data')
return msg
def display_data(self, msg):
content = msg['content']
content.pop("source", None)
data = content['data']
if 'application/json' in data:
try:
data['application/json'] = json.loads(data['application/json'])
except Exception:
# warn?
pass
return msg
# stdin channel
def input_request(self, msg):
msg['content'].setdefault('password', False)
return msg
def adapt(msg, to_version=protocol_version_info[0]):
"""Adapt a single message to a target version
Parameters
----------
msg : dict
A Jupyter message.
to_version : int, optional
The target major version.
If unspecified, adapt to the current version.
Returns
-------
msg : dict
A Jupyter message appropriate in the new version.
"""
header = msg['header']
if 'date' not in header:
header['date'] = datetime.now().isoformat()
if 'version' in header:
from_version = int(header['version'].split('.')[0])
else:
# assume last version before adding the key to the header
from_version = 4
adapter = adapters.get((from_version, to_version), None)
if adapter is None:
return msg
return adapter(msg)
# one adapter per major version from,to
adapters = {
(5,4) : V5toV4(),
(4,5) : V4toV5(),
}
| {
"repo_name": "lancezlin/ml_template_py",
"path": "lib/python2.7/site-packages/jupyter_client/adapter.py",
"copies": "3",
"size": "12462",
"license": "mit",
"hash": 2297094977251245800,
"line_mean": 29.6191646192,
"line_max": 94,
"alpha_frac": 0.5692505216,
"autogenerated": false,
"ratio": 4.025193798449612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003029495630408625,
"num_lines": 407
} |
"""Adapters for Jupyter msg spec versions."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import re
import json
from jupyter_client import protocol_version_info
def code_to_line(code, cursor_pos):
"""Turn a multiline code block and cursor position into a single line
and new cursor position.
For adapting ``complete_`` and ``object_info_request``.
"""
if not code:
return "", 0
for line in code.splitlines(True):
n = len(line)
if cursor_pos > n:
cursor_pos -= n
else:
break
return line, cursor_pos
_match_bracket = re.compile(r'\([^\(\)]+\)', re.UNICODE)
_end_bracket = re.compile(r'\([^\(]*$', re.UNICODE)
_identifier = re.compile(r'[a-z_][0-9a-z._]*', re.I|re.UNICODE)
def extract_oname_v4(code, cursor_pos):
"""Reimplement token-finding logic from IPython 2.x javascript
for adapting object_info_request from v5 to v4
"""
line, _ = code_to_line(code, cursor_pos)
oldline = line
line = _match_bracket.sub(u'', line)
while oldline != line:
oldline = line
line = _match_bracket.sub(u'', line)
# remove everything after last open bracket
line = _end_bracket.sub('', line)
matches = _identifier.findall(line)
if matches:
return matches[-1]
else:
return ''
class Adapter(object):
"""Base class for adapting messages
Override message_type(msg) methods to create adapters.
"""
msg_type_map = {}
def update_header(self, msg):
return msg
def update_metadata(self, msg):
return msg
def update_msg_type(self, msg):
header = msg['header']
msg_type = header['msg_type']
if msg_type in self.msg_type_map:
msg['msg_type'] = header['msg_type'] = self.msg_type_map[msg_type]
return msg
def handle_reply_status_error(self, msg):
"""This will be called *instead of* the regular handler
on any reply with status != ok
"""
return msg
def __call__(self, msg):
msg = self.update_header(msg)
msg = self.update_metadata(msg)
msg = self.update_msg_type(msg)
header = msg['header']
handler = getattr(self, header['msg_type'], None)
if handler is None:
return msg
# handle status=error replies separately (no change, at present)
if msg['content'].get('status', None) in {'error', 'aborted'}:
return self.handle_reply_status_error(msg)
return handler(msg)
def _version_str_to_list(version):
"""convert a version string to a list of ints
non-int segments are excluded
"""
v = []
for part in version.split('.'):
try:
v.append(int(part))
except ValueError:
pass
return v
class V5toV4(Adapter):
"""Adapt msg protocol v5 to v4"""
version = '4.1'
msg_type_map = {
'execute_result' : 'pyout',
'execute_input' : 'pyin',
'error' : 'pyerr',
'inspect_request' : 'object_info_request',
'inspect_reply' : 'object_info_reply',
}
def update_header(self, msg):
msg['header'].pop('version', None)
return msg
# shell channel
def kernel_info_reply(self, msg):
v4c = {}
content = msg['content']
for key in ('language_version', 'protocol_version'):
if key in content:
v4c[key] = _version_str_to_list(content[key])
if content.get('implementation', '') == 'ipython' \
and 'implementation_version' in content:
v4c['ipython_version'] = _version_str_to_list(content['implementation_version'])
language_info = content.get('language_info', {})
language = language_info.get('name', '')
v4c.setdefault('language', language)
if 'version' in language_info:
v4c.setdefault('language_version', _version_str_to_list(language_info['version']))
msg['content'] = v4c
return msg
def execute_request(self, msg):
content = msg['content']
content.setdefault('user_variables', [])
return msg
def execute_reply(self, msg):
content = msg['content']
content.setdefault('user_variables', {})
# TODO: handle payloads
return msg
def complete_request(self, msg):
content = msg['content']
code = content['code']
cursor_pos = content['cursor_pos']
line, cursor_pos = code_to_line(code, cursor_pos)
new_content = msg['content'] = {}
new_content['text'] = ''
new_content['line'] = line
new_content['block'] = None
new_content['cursor_pos'] = cursor_pos
return msg
def complete_reply(self, msg):
content = msg['content']
cursor_start = content.pop('cursor_start')
cursor_end = content.pop('cursor_end')
match_len = cursor_end - cursor_start
content['matched_text'] = content['matches'][0][:match_len]
content.pop('metadata', None)
return msg
def object_info_request(self, msg):
content = msg['content']
code = content['code']
cursor_pos = content['cursor_pos']
line, _ = code_to_line(code, cursor_pos)
new_content = msg['content'] = {}
new_content['oname'] = extract_oname_v4(code, cursor_pos)
new_content['detail_level'] = content['detail_level']
return msg
def object_info_reply(self, msg):
"""inspect_reply can't be easily backward compatible"""
msg['content'] = {'found' : False, 'oname' : 'unknown'}
return msg
# iopub channel
def stream(self, msg):
content = msg['content']
content['data'] = content.pop('text')
return msg
def display_data(self, msg):
content = msg['content']
content.setdefault("source", "display")
data = content['data']
if 'application/json' in data:
try:
data['application/json'] = json.dumps(data['application/json'])
except Exception:
# warn?
pass
return msg
# stdin channel
def input_request(self, msg):
msg['content'].pop('password', None)
return msg
class V4toV5(Adapter):
"""Convert msg spec V4 to V5"""
version = '5.0'
# invert message renames above
msg_type_map = {v:k for k,v in V5toV4.msg_type_map.items()}
def update_header(self, msg):
msg['header']['version'] = self.version
return msg
# shell channel
def kernel_info_reply(self, msg):
content = msg['content']
for key in ('protocol_version', 'ipython_version'):
if key in content:
content[key] = '.'.join(map(str, content[key]))
content.setdefault('protocol_version', '4.1')
if content['language'].startswith('python') and 'ipython_version' in content:
content['implementation'] = 'ipython'
content['implementation_version'] = content.pop('ipython_version')
language = content.pop('language')
language_info = content.setdefault('language_info', {})
language_info.setdefault('name', language)
if 'language_version' in content:
language_version = '.'.join(map(str, content.pop('language_version')))
language_info.setdefault('version', language_version)
content['banner'] = ''
return msg
def execute_request(self, msg):
content = msg['content']
user_variables = content.pop('user_variables', [])
user_expressions = content.setdefault('user_expressions', {})
for v in user_variables:
user_expressions[v] = v
return msg
def execute_reply(self, msg):
content = msg['content']
user_expressions = content.setdefault('user_expressions', {})
user_variables = content.pop('user_variables', {})
if user_variables:
user_expressions.update(user_variables)
# Pager payloads became a mime bundle
for payload in content.get('payload', []):
if payload.get('source', None) == 'page' and ('text' in payload):
if 'data' not in payload:
payload['data'] = {}
payload['data']['text/plain'] = payload.pop('text')
return msg
def complete_request(self, msg):
old_content = msg['content']
new_content = msg['content'] = {}
new_content['code'] = old_content['line']
new_content['cursor_pos'] = old_content['cursor_pos']
return msg
def complete_reply(self, msg):
# complete_reply needs more context than we have to get cursor_start and end.
# use special end=null to indicate current cursor position and negative offset
# for start relative to the cursor.
# start=None indicates that start == end (accounts for no -0).
content = msg['content']
new_content = msg['content'] = {'status' : 'ok'}
new_content['matches'] = content['matches']
if content['matched_text']:
new_content['cursor_start'] = -len(content['matched_text'])
else:
# no -0, use None to indicate that start == end
new_content['cursor_start'] = None
new_content['cursor_end'] = None
new_content['metadata'] = {}
return msg
def inspect_request(self, msg):
content = msg['content']
name = content['oname']
new_content = msg['content'] = {}
new_content['code'] = name
new_content['cursor_pos'] = len(name)
new_content['detail_level'] = content['detail_level']
return msg
def inspect_reply(self, msg):
"""inspect_reply can't be easily backward compatible"""
content = msg['content']
new_content = msg['content'] = {'status' : 'ok'}
found = new_content['found'] = content['found']
new_content['data'] = data = {}
new_content['metadata'] = {}
if found:
lines = []
for key in ('call_def', 'init_definition', 'definition'):
if content.get(key, False):
lines.append(content[key])
break
for key in ('call_docstring', 'init_docstring', 'docstring'):
if content.get(key, False):
lines.append(content[key])
break
if not lines:
lines.append("<empty docstring>")
data['text/plain'] = '\n'.join(lines)
return msg
# iopub channel
def stream(self, msg):
content = msg['content']
content['text'] = content.pop('data')
return msg
def display_data(self, msg):
content = msg['content']
content.pop("source", None)
data = content['data']
if 'application/json' in data:
try:
data['application/json'] = json.loads(data['application/json'])
except Exception:
# warn?
pass
return msg
# stdin channel
def input_request(self, msg):
msg['content'].setdefault('password', False)
return msg
def adapt(msg, to_version=protocol_version_info[0]):
"""Adapt a single message to a target version
Parameters
----------
msg : dict
A Jupyter message.
to_version : int, optional
The target major version.
If unspecified, adapt to the current version.
Returns
-------
msg : dict
A Jupyter message appropriate in the new version.
"""
header = msg['header']
if 'version' in header:
from_version = int(header['version'].split('.')[0])
else:
# assume last version before adding the key to the header
from_version = 4
adapter = adapters.get((from_version, to_version), None)
if adapter is None:
return msg
return adapter(msg)
# one adapter per major version from,to
adapters = {
(5,4) : V5toV4(),
(4,5) : V4toV5(),
}
| {
"repo_name": "bdh1011/wau",
"path": "venv/lib/python2.7/site-packages/jupyter_client/adapter.py",
"copies": "1",
"size": "12208",
"license": "mit",
"hash": 484897025171666100,
"line_mean": 29.52,
"line_max": 94,
"alpha_frac": 0.5686435125,
"autogenerated": false,
"ratio": 4.022405271828665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5091048784328664,
"avg_score": null,
"num_lines": null
} |
"""Adapters for registering models with django-watson."""
from __future__ import unicode_literals
import sys, json
from itertools import chain, islice
from threading import local
from functools import wraps
from weakref import WeakValueDictionary
from compat import force_text
from django.conf import settings
from django.core.signals import request_finished
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.db.models.query import QuerySet
from django.db.models.signals import post_save, pre_delete
from django.utils.html import strip_tags
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from watson.models import SearchEntry, has_int_pk
class SearchAdapterError(Exception):
"""Something went wrong with a search adapter."""
class SearchAdapter(object):
"""An adapter for performing a full-text search on a model."""
# Use to specify the fields that should be included in the search.
fields = ()
# Use to exclude fields from the search.
exclude = ()
# Use to specify object properties to be stored in the search index.
store = ()
def __init__(self, model):
"""Initializes the search adapter."""
self.model = model
def _resolve_field(self, obj, name):
"""Resolves the content of the given model field."""
name_parts = name.split("__", 1)
prefix = name_parts[0]
# If we're at the end of the resolve chain, return.
if obj is None:
return ""
# Try to get the attribute from the object.
try:
value = getattr(obj, prefix)
except ObjectDoesNotExist:
return ""
except AttributeError:
# Try to get the attribute from the search adapter.
try:
value = getattr(self, prefix)
except AttributeError:
raise SearchAdapterError("Could not find a property called {name!r} on either {obj!r} or {search_adapter!r}".format(
name = prefix,
obj = obj,
search_adapter = self,
))
else:
# Run the attribute on the search adapter, if it's callable.
if not isinstance(value, (QuerySet, models.Manager)):
if callable(value):
value = value(obj)
else:
# Run the attribute on the object, if it's callable.
if not isinstance(value, (QuerySet, models.Manager)):
if callable(value):
value = value()
# Look up recursive fields.
if len(name_parts) == 2:
if isinstance(value, (QuerySet, models.Manager)):
return " ".join(force_text(self._resolve_field(obj, name_parts[1])) for obj in value.all())
return self._resolve_field(value, name_parts[1])
# Resolve querysets.
if isinstance(value, (QuerySet, models.Manager)):
value = " ".join(force_text(related) for related in value.all())
# Resolution complete!
return value
def prepare_content(self, content):
"""Sanitizes the given content string for better parsing by the search engine."""
# Strip out HTML tags.
content = strip_tags(content)
return content
def get_title(self, obj):
"""
Returns the title of this search result. This is given high priority in search result ranking.
You can access the title of the search entry as `entry.title` in your search results.
The default implementation returns `force_text(obj)`.
"""
return force_text(obj)
def get_description(self, obj):
"""
Returns the description of this search result. This is given medium priority in search result ranking.
You can access the description of the search entry as `entry.description` in your search results. Since
this should contains a short description of the search entry, it's excellent for providing a summary
in your search results.
The default implementation returns `""`.
"""
return ""
def get_content(self, obj):
"""
Returns the content of this search result. This is given low priority in search result ranking.
You can access the content of the search entry as `entry.content` in your search results, although
this field generally contains a big mess of search data so is less suitable for frontend display.
The default implementation returns all the registered fields in your model joined together.
"""
# Get the field names to look up.
field_names = self.fields or (field.name for field in self.model._meta.fields if isinstance(field, (models.CharField, models.TextField)))
# Exclude named fields.
field_names = (field_name for field_name in field_names if field_name not in self.exclude)
# Create the text.
return self.prepare_content(" ".join(
force_text(self._resolve_field(obj, field_name))
for field_name in field_names
))
def get_url(self, obj):
"""Return the URL of the given obj."""
if hasattr(obj, "get_absolute_url"):
return obj.get_absolute_url()
return ""
def get_meta(self, obj):
"""Returns a dictionary of meta information about the given obj."""
return dict(
(field_name, self._resolve_field(obj, field_name))
for field_name in self.store
)
def get_live_queryset(self):
"""
Returns the queryset of objects that should be considered live.
If this returns None, then all objects should be considered live, which is more efficient.
"""
return None
class SearchEngineError(Exception):
"""Something went wrong with a search engine."""
class RegistrationError(SearchEngineError):
"""Something went wrong when registering a model with a search engine."""
class SearchContextError(Exception):
"""Something went wrong with the search context management."""
def _bulk_save_search_entries(search_entries, batch_size=100):
"""Creates the given search entry data in the most efficient way possible."""
if search_entries:
if hasattr(SearchEntry.objects, "bulk_create"):
search_entries = iter(search_entries)
while True:
search_entry_batch = list(islice(search_entries, 0, batch_size))
if not search_entry_batch:
break
SearchEntry.objects.bulk_create(search_entry_batch)
else:
for search_entry in search_entries:
search_entry.save()
class SearchContextManager(local):
"""A thread-local context manager used to manage saving search data."""
def __init__(self):
"""Initializes the search context."""
self._stack = []
# Connect to the signalling framework.
request_finished.connect(self._request_finished_receiver)
def is_active(self):
"""Checks that this search context is active."""
return bool(self._stack)
def _assert_active(self):
"""Ensures that the search context is active."""
if not self.is_active():
raise SearchContextError("The search context is not active.")
def start(self):
"""Starts a level in the search context."""
self._stack.append((set(), False))
def add_to_context(self, engine, obj):
"""Adds an object to the current context, if active."""
self._assert_active()
objects, _ = self._stack[-1]
objects.add((engine, obj))
def invalidate(self):
"""Marks this search context as broken, so should not be commited."""
self._assert_active()
objects, _ = self._stack[-1]
self._stack[-1] = (objects, True)
def is_invalid(self):
"""Checks whether this search context is invalid."""
self._assert_active()
_, is_invalid = self._stack[-1]
return is_invalid
def end(self):
"""Ends a level in the search context."""
self._assert_active()
# Save all the models.
tasks, is_invalid = self._stack.pop()
if not is_invalid:
_bulk_save_search_entries(list(chain.from_iterable(engine._update_obj_index_iter(obj) for engine, obj in tasks)))
# Context management.
def update_index(self):
"""
Marks up a block of code as requiring the search indexes to be updated.
The returned context manager can also be used as a decorator.
"""
return SearchContext(self)
def skip_index_update(self):
"""
Marks up a block of code as not requiring a search index update.
Like update_index, the returned context manager can also be used as a decorator.
"""
return SkipSearchContext(self)
# Signalling hooks.
def _request_finished_receiver(self, **kwargs):
"""
Called at the end of a request, ensuring that any open contexts
are closed. Not closing all active contexts can cause memory leaks
and weird behaviour.
"""
while self.is_active():
self.end()
class SearchContext(object):
"""An individual context for a search index update."""
def __init__(self, context_manager):
"""Initializes the search index context."""
self._context_manager = context_manager
def __enter__(self):
"""Enters a block of search index management."""
self._context_manager.start()
def __exit__(self, exc_type, exc_value, traceback):
"""Leaves a block of search index management."""
try:
if exc_type is not None:
self._context_manager.invalidate()
finally:
self._context_manager.end()
def __call__(self, func):
"""Allows this search index context to be used as a decorator."""
@wraps(func)
def do_search_context(*args, **kwargs):
self.__enter__()
exception = False
try:
return func(*args, **kwargs)
except:
exception = True
if not self.__exit__(*sys.exc_info()):
raise
finally:
if not exception:
self.__exit__(None, None, None)
return do_search_context
class SkipSearchContext(SearchContext):
"""A context that skips over index updating"""
def __exit__(self, exc_type, exc_value, traceback):
"""Mark it as invalid and exit"""
try:
self._context_manager.invalidate()
finally:
self._context_manager.end()
# The shared, thread-safe search context manager.
search_context_manager = SearchContextManager()
class SearchEngine(object):
"""A search engine capable of performing multi-table searches."""
_created_engines = WeakValueDictionary()
@classmethod
def get_created_engines(cls):
"""Returns all created search engines."""
return list(cls._created_engines.items())
def __init__(self, engine_slug, search_context_manager=search_context_manager):
"""Initializes the search engine."""
# Check the slug is unique for this project.
if engine_slug in SearchEngine._created_engines:
raise SearchEngineError("A search engine has already been created with the slug {engine_slug!r}".format(
engine_slug = engine_slug,
))
# Initialize thie engine.
self._registered_models = {}
self._engine_slug = engine_slug
# Store the search context.
self._search_context_manager = search_context_manager
# Store a reference to this engine.
self.__class__._created_engines[engine_slug] = self
def is_registered(self, model):
"""Checks whether the given model is registered with this search engine."""
return model in self._registered_models
def register(self, model, adapter_cls=SearchAdapter, **field_overrides):
"""
Registers the given model with this search engine.
If the given model is already registered with this search engine, a
RegistrationError will be raised.
"""
# Add in custom live filters.
if isinstance(model, QuerySet):
live_queryset = model
model = model.model
field_overrides["get_live_queryset"] = lambda self_: live_queryset.all()
# Check for existing registration.
if self.is_registered(model):
raise RegistrationError("{model!r} is already registered with this search engine".format(
model = model,
))
# Perform any customization.
if field_overrides:
# Conversion to str is needed because Python 2 doesn't accept unicode for class name
adapter_cls = type(str("Custom") + adapter_cls.__name__, (adapter_cls,), field_overrides)
# Perform the registration.
adapter_obj = adapter_cls(model)
self._registered_models[model] = adapter_obj
# Connect to the signalling framework.
post_save.connect(self._post_save_receiver, model)
pre_delete.connect(self._pre_delete_receiver, model)
def unregister(self, model):
"""
Unregisters the given model with this search engine.
If the given model is not registered with this search engine, a RegistrationError
will be raised.
"""
# Add in custom live filters.
if isinstance(model, QuerySet):
model = model.model
# Check for registration.
if not self.is_registered(model):
raise RegistrationError("{model!r} is not registered with this search engine".format(
model = model,
))
# Perform the unregistration.
del self._registered_models[model]
# Disconnect from the signalling framework.
post_save.disconnect(self._post_save_receiver, model)
pre_delete.disconnect(self._pre_delete_receiver, model)
def get_registered_models(self):
"""Returns a sequence of models that have been registered with this search engine."""
return list(self._registered_models.keys())
def get_adapter(self, model):
"""Returns the adapter associated with the given model."""
if self.is_registered(model):
return self._registered_models[model]
raise RegistrationError("{model!r} is not registered with this search engine".format(
model = model,
))
def _get_entries_for_obj(self, obj):
"""Returns a queryset of entries associate with the given obj."""
model = obj.__class__
content_type = ContentType.objects.get_for_model(model)
object_id = force_text(obj.pk)
# Get the basic list of search entries.
search_entries = SearchEntry.objects.filter(
content_type = content_type,
engine_slug = self._engine_slug,
)
if has_int_pk(model):
# Do a fast indexed lookup.
object_id_int = int(obj.pk)
search_entries = search_entries.filter(
object_id_int = object_id_int,
)
else:
# Alas, have to do a slow unindexed lookup.
object_id_int = None
search_entries = search_entries.filter(
object_id = object_id,
)
return object_id_int, search_entries
def _update_obj_index_iter(self, obj):
"""Either updates the given object index, or yields an unsaved search entry."""
model = obj.__class__
adapter = self.get_adapter(model)
content_type = ContentType.objects.get_for_model(model)
object_id = force_text(obj.pk)
# Create the search entry data.
search_entry_data = {
"engine_slug": self._engine_slug,
"title": adapter.get_title(obj),
"description": adapter.get_description(obj),
"content": adapter.get_content(obj),
"url": adapter.get_url(obj),
"meta_encoded": json.dumps(adapter.get_meta(obj)),
}
# Try to get the existing search entry.
object_id_int, search_entries = self._get_entries_for_obj(obj)
# Attempt to update the search entries.
update_count = search_entries.update(**search_entry_data)
if update_count == 0:
# This is the first time the entry was created.
search_entry_data.update((
("content_type", content_type),
("object_id", object_id),
("object_id_int", object_id_int),
))
yield SearchEntry(**search_entry_data)
elif update_count > 1:
# Oh no! Somehow we've got duplicated search entries!
search_entries.exclude(id=search_entries[0].id).delete()
def update_obj_index(self, obj):
"""Updates the search index for the given obj."""
_bulk_save_search_entries(list(self._update_obj_index_iter(obj)))
# Signalling hooks.
def _post_save_receiver(self, instance, **kwargs):
"""Signal handler for when a registered model has been saved."""
if self._search_context_manager.is_active():
self._search_context_manager.add_to_context(self, instance)
else:
self.update_obj_index(instance)
def _pre_delete_receiver(self, instance, **kwargs):
"""Signal handler for when a registered model has been deleted."""
_, search_entries = self._get_entries_for_obj(instance)
search_entries.delete()
# Searching.
def _create_model_filter(self, models):
"""Creates a filter for the given model/queryset list."""
filters = Q()
for model in models:
filter = Q()
# Process querysets.
if isinstance(model, QuerySet):
sub_queryset = model
model = model.model
queryset = sub_queryset.values_list("pk", flat=True)
if has_int_pk(model):
filter &= Q(
object_id_int__in = queryset,
)
else:
live_ids = list(queryset)
if live_ids:
filter &= Q(
object_id__in = live_ids,
)
else:
# HACK: There is a bug in Django (https://code.djangoproject.com/ticket/15145) that messes up __in queries when the iterable is empty.
# This bit of nonsense ensures that this aspect of the query will be impossible to fulfill.
filter &= Q(
content_type = ContentType.objects.get_for_model(model).id + 1,
)
# Add the model to the filter.
content_type = ContentType.objects.get_for_model(model)
filter &= Q(
content_type = content_type,
)
# Combine with the other filters.
filters |= filter
return filters
def _get_included_models(self, models):
"""Returns an iterable of models and querysets that should be included in the search query."""
for model in models or self.get_registered_models():
if isinstance(model, QuerySet):
yield model
else:
adaptor = self.get_adapter(model)
queryset = adaptor.get_live_queryset()
if queryset is None:
yield model
else:
yield queryset.all()
def search(self, search_text, models=(), exclude=(), ranking=True, backend_name=None):
"""Performs a search using the given text, returning a queryset of SearchEntry."""
# Check for blank search text.
search_text = search_text.strip()
if not search_text:
return SearchEntry.objects.none()
# Get the initial queryset.
queryset = SearchEntry.objects.filter(
engine_slug = self._engine_slug,
)
# Process the allowed models.
queryset = queryset.filter(
self._create_model_filter(self._get_included_models(models))
).exclude(
self._create_model_filter(exclude)
)
# Perform the backend-specific full text match.
backend = get_backend(backend_name=backend_name)
queryset = backend.do_search(self._engine_slug, queryset, search_text)
# Perform the backend-specific full-text ranking.
if ranking:
queryset = backend.do_search_ranking(self._engine_slug, queryset, search_text)
# Return the complete queryset.
return queryset
def filter(self, queryset, search_text, ranking=True, backend_name=None):
"""
Filters the given model or queryset using the given text, returning the
modified queryset.
"""
# If the queryset is a model, get all of them.
if isinstance(queryset, type) and issubclass(queryset, models.Model):
queryset = queryset._default_manager.all()
# Check for blank search text.
search_text = search_text.strip()
if not search_text:
return queryset
# Perform the backend-specific full text match.
backend = get_backend(backend_name=backend_name)
queryset = backend.do_filter(self._engine_slug, queryset, search_text)
# Perform the backend-specific full-text ranking.
if ranking:
queryset = backend.do_filter_ranking(self._engine_slug, queryset, search_text)
# Return the complete queryset.
return queryset
# The default search engine.
default_search_engine = SearchEngine("default")
# The cache for the initialized backend.
_backends_cache = {}
def get_backend(backend_name=None):
"""Initializes and returns the search backend."""
global _backends_cache
# Try to use the cached backend.
if backend_name in _backends_cache:
return _backends_cache[backend_name]
# Load the backend class.
if not backend_name:
backend_name = getattr(settings, "WATSON_BACKEND", "watson.backends.AdaptiveSearchBackend")
backend_module_name, backend_cls_name = backend_name.rsplit(".", 1)
backend_module = import_module(backend_module_name)
try:
backend_cls = getattr(backend_module, backend_cls_name)
except AttributeError:
raise ImproperlyConfigured("Could not find a class named {backend_module_name!r} in {backend_cls_name!r}".format(
backend_module_name = backend_module_name,
backend_cls_name = backend_cls_name,
))
# Initialize the backend.
backend = backend_cls()
_backends_cache[backend_name] = backend
return backend
| {
"repo_name": "arteria/django-watson",
"path": "src/watson/registration.py",
"copies": "1",
"size": "23286",
"license": "bsd-3-clause",
"hash": 5331670670279649000,
"line_mean": 36.7406807131,
"line_max": 158,
"alpha_frac": 0.6068453148,
"autogenerated": false,
"ratio": 4.516291698991466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023120427145976756,
"num_lines": 617
} |
""" Adapters for the :py:class:`.hardware_control.API` instances.
"""
import asyncio
import functools
from typing import TYPE_CHECKING
from .types import HardwareAPILike
if TYPE_CHECKING:
from .dev_types import HasLoop # noqa (F501)
# TODO: BC 2020-02-25 instead of overwriting __get_attribute__ in this class
# use inspect.getmembers to iterate over appropriate members of adapted
# instance and setattr on the outer instance with the proper async resolution
# logic injected. This approach avoids requiring calls to
# object.__get_attribute__(self,...) to opt out of the overwritten
# functionality. It is more readable and protected from
# unintentional recursion.
class SynchronousAdapter(HardwareAPILike):
""" A wrapper to make every call into :py:class:`.hardware_control.API`
synchronous.
This class expects to wrap an asynchronous object running in its own thread
and event loop (obj._loop). Attempting to instantiate a SynchronousAdapter
in the main thread within it's event loop will hang unless the adapted
async object is running on its own thread and contained loop.
In these Cases, it is often helpful to instantiate the API via the
:py:class:`opentrons.hardware_control.ThreadManager` to ensure that
all API coroutines are resolved in a thread/loop other than the
main thread/loop.
Example
-------
.. code-block::
>>> import opentrons.hardware_control as hc
>>> import opentrons.hardware_control.adapters as adapts
>>> api = hc.API.build_hardware_simulator()
>>> sync_api = adapts.SynchronousAdapter(api)
>>> sync_api.home()
"""
def __init__(self, asynchronous_instance: 'HasLoop') -> None:
""" Build the SynchronousAdapter.
:param asynchronous_instance: The asynchronous class instance to wrap
"""
self._obj_to_adapt = asynchronous_instance
def __repr__(self):
return '<SynchronousAdapter>'
@staticmethod
def call_coroutine_sync(loop, to_call, *args, **kwargs):
fut = asyncio.run_coroutine_threadsafe(to_call(*args, **kwargs), loop)
return fut.result()
def __getattribute__(self, attr_name):
""" Retrieve attributes from our API and wrap coroutines """
# Almost every attribute retrieved from us will be for people actually
# looking for an attribute of the hardware API, so check there first.
obj_to_adapt = object.__getattribute__(self, '_obj_to_adapt')
try:
inner_attr = getattr(obj_to_adapt, attr_name)
except AttributeError:
# Maybe this actually was for us? Let’s find it
return object.__getattribute__(self, attr_name)
check = inner_attr
if isinstance(inner_attr, functools.partial):
# if partial func check passed in func
check = inner_attr.func
try:
# if decorated func check wrapped func
check = check.__wrapped__
except AttributeError:
pass
if asyncio.iscoroutinefunction(check):
# Return a synchronized version of the coroutine
return functools.partial(
object.__getattribute__(self, 'call_coroutine_sync'),
obj_to_adapt._loop, inner_attr)
elif asyncio.iscoroutine(check):
# Catch awaitable properties and reify the future before returning
fut = asyncio.run_coroutine_threadsafe(check, obj_to_adapt._loop)
return fut.result()
return inner_attr
| {
"repo_name": "Opentrons/labware",
"path": "api/src/opentrons/hardware_control/adapters.py",
"copies": "3",
"size": "3543",
"license": "apache-2.0",
"hash": 8390573801762520000,
"line_mean": 39.2386363636,
"line_max": 79,
"alpha_frac": 0.6676080203,
"autogenerated": false,
"ratio": 4.366214549938348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00023191094619666046,
"num_lines": 88
} |
"""Adapter to wrap the rachiopy api for home assistant."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, HTTP_OK
from homeassistant.helpers import config_validation as cv
from .const import (
DOMAIN,
KEY_DEVICES,
KEY_ENABLED,
KEY_EXTERNAL_ID,
KEY_FLEX_SCHEDULES,
KEY_ID,
KEY_MAC_ADDRESS,
KEY_MODEL,
KEY_NAME,
KEY_SCHEDULES,
KEY_SERIAL_NUMBER,
KEY_STATUS,
KEY_USERNAME,
KEY_ZONES,
MODEL_GENERATION_1,
SERVICE_PAUSE_WATERING,
SERVICE_RESUME_WATERING,
)
from .webhooks import LISTEN_EVENT_TYPES, WEBHOOK_CONST_ID
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICES = "devices"
ATTR_DURATION = "duration"
PERMISSION_ERROR = "7"
PAUSE_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_DEVICES): cv.string,
vol.Optional(ATTR_DURATION, default=60): cv.positive_int,
}
)
RESUME_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_DEVICES): cv.string})
class RachioPerson:
"""Represent a Rachio user."""
def __init__(self, rachio, config_entry):
"""Create an object from the provided API instance."""
# Use API token to get user ID
self.rachio = rachio
self.config_entry = config_entry
self.username = None
self._id = None
self._controllers = []
def setup(self, hass):
"""Rachio device setup."""
all_devices = []
can_pause = False
response = self.rachio.person.info()
assert int(response[0][KEY_STATUS]) == HTTP_OK, "API key error"
self._id = response[1][KEY_ID]
# Use user ID to get user data
data = self.rachio.person.get(self._id)
assert int(data[0][KEY_STATUS]) == HTTP_OK, "User ID error"
self.username = data[1][KEY_USERNAME]
devices = data[1][KEY_DEVICES]
for controller in devices:
webhooks = self.rachio.notification.get_device_webhook(controller[KEY_ID])[
1
]
# The API does not provide a way to tell if a controller is shared
# or if they are the owner. To work around this problem we fetch the webooks
# before we setup the device so we can skip it instead of failing.
# webhooks are normally a list, however if there is an error
# rachio hands us back a dict
if isinstance(webhooks, dict):
if webhooks.get("code") == PERMISSION_ERROR:
_LOGGER.info(
"Not adding controller '%s', only controllers owned by '%s' may be added",
controller[KEY_NAME],
self.username,
)
else:
_LOGGER.error(
"Failed to add rachio controller '%s' because of an error: %s",
controller[KEY_NAME],
webhooks.get("error", "Unknown Error"),
)
continue
rachio_iro = RachioIro(hass, self.rachio, controller, webhooks)
rachio_iro.setup()
self._controllers.append(rachio_iro)
all_devices.append(rachio_iro.name)
# Generation 1 controllers don't support pause or resume
if rachio_iro.model.split("_")[0] != MODEL_GENERATION_1:
can_pause = True
_LOGGER.info('Using Rachio API as user "%s"', self.username)
def pause_water(service):
"""Service to pause watering on all or specific controllers."""
duration = service.data[ATTR_DURATION]
devices = service.data.get(ATTR_DEVICES, all_devices)
for iro in self._controllers:
if iro.name in devices:
iro.pause_watering(duration)
def resume_water(service):
"""Service to resume watering on all or specific controllers."""
devices = service.data.get(ATTR_DEVICES, all_devices)
for iro in self._controllers:
if iro.name in devices:
iro.resume_watering()
if can_pause:
hass.services.register(
DOMAIN,
SERVICE_PAUSE_WATERING,
pause_water,
schema=PAUSE_SERVICE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_RESUME_WATERING,
resume_water,
schema=RESUME_SERVICE_SCHEMA,
)
@property
def user_id(self) -> str:
"""Get the user ID as defined by the Rachio API."""
return self._id
@property
def controllers(self) -> list:
"""Get a list of controllers managed by this account."""
return self._controllers
def start_multiple_zones(self, zones) -> None:
"""Start multiple zones."""
self.rachio.zone.start_multiple(zones)
class RachioIro:
"""Represent a Rachio Iro."""
def __init__(self, hass, rachio, data, webhooks):
"""Initialize a Rachio device."""
self.hass = hass
self.rachio = rachio
self._id = data[KEY_ID]
self.name = data[KEY_NAME]
self.serial_number = data[KEY_SERIAL_NUMBER]
self.mac_address = data[KEY_MAC_ADDRESS]
self.model = data[KEY_MODEL]
self._zones = data[KEY_ZONES]
self._schedules = data[KEY_SCHEDULES]
self._flex_schedules = data[KEY_FLEX_SCHEDULES]
self._init_data = data
self._webhooks = webhooks
_LOGGER.debug('%s has ID "%s"', self, self.controller_id)
def setup(self):
"""Rachio Iro setup for webhooks."""
# Listen for all updates
self._init_webhooks()
def _init_webhooks(self) -> None:
"""Start getting updates from the Rachio API."""
current_webhook_id = None
# First delete any old webhooks that may have stuck around
def _deinit_webhooks(_) -> None:
"""Stop getting updates from the Rachio API."""
if not self._webhooks:
# We fetched webhooks when we created the device, however if we call _init_webhooks
# again we need to fetch again
self._webhooks = self.rachio.notification.get_device_webhook(
self.controller_id
)[1]
for webhook in self._webhooks:
if (
webhook[KEY_EXTERNAL_ID].startswith(WEBHOOK_CONST_ID)
or webhook[KEY_ID] == current_webhook_id
):
self.rachio.notification.delete(webhook[KEY_ID])
self._webhooks = None
_deinit_webhooks(None)
# Choose which events to listen for and get their IDs
event_types = []
for event_type in self.rachio.notification.get_webhook_event_type()[1]:
if event_type[KEY_NAME] in LISTEN_EVENT_TYPES:
event_types.append({"id": event_type[KEY_ID]})
# Register to listen to these events from the device
url = self.rachio.webhook_url
auth = WEBHOOK_CONST_ID + self.rachio.webhook_auth
new_webhook = self.rachio.notification.add(
self.controller_id, auth, url, event_types
)
# Save ID for deletion at shutdown
current_webhook_id = new_webhook[1][KEY_ID]
self.hass.bus.listen(EVENT_HOMEASSISTANT_STOP, _deinit_webhooks)
def __str__(self) -> str:
"""Display the controller as a string."""
return f'Rachio controller "{self.name}"'
@property
def controller_id(self) -> str:
"""Return the Rachio API controller ID."""
return self._id
@property
def current_schedule(self) -> str:
"""Return the schedule that the device is running right now."""
return self.rachio.device.current_schedule(self.controller_id)[1]
@property
def init_data(self) -> dict:
"""Return the information used to set up the controller."""
return self._init_data
def list_zones(self, include_disabled=False) -> list:
"""Return a list of the zone dicts connected to the device."""
# All zones
if include_disabled:
return self._zones
# Only enabled zones
return [z for z in self._zones if z[KEY_ENABLED]]
def get_zone(self, zone_id) -> dict | None:
"""Return the zone with the given ID."""
for zone in self.list_zones(include_disabled=True):
if zone[KEY_ID] == zone_id:
return zone
return None
def list_schedules(self) -> list:
"""Return a list of fixed schedules."""
return self._schedules
def list_flex_schedules(self) -> list:
"""Return a list of flex schedules."""
return self._flex_schedules
def stop_watering(self) -> None:
"""Stop watering all zones connected to this controller."""
self.rachio.device.stop_water(self.controller_id)
_LOGGER.info("Stopped watering of all zones on %s", self)
def pause_watering(self, duration) -> None:
"""Pause watering on this controller."""
self.rachio.device.pause_zone_run(self.controller_id, duration * 60)
_LOGGER.debug("Paused watering on %s for %s minutes", self, duration)
def resume_watering(self) -> None:
"""Resume paused watering on this controller."""
self.rachio.device.resume_zone_run(self.controller_id)
_LOGGER.debug("Resuming watering on %s", self)
| {
"repo_name": "sander76/home-assistant",
"path": "homeassistant/components/rachio/device.py",
"copies": "3",
"size": "9590",
"license": "apache-2.0",
"hash": 6146850347762021000,
"line_mean": 34.2573529412,
"line_max": 99,
"alpha_frac": 0.5822732013,
"autogenerated": false,
"ratio": 3.9465020576131686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019876558045758312,
"num_lines": 272
} |
"""Adapter to wrap the rachiopy api for home assistant."""
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, HTTP_OK
from homeassistant.helpers import config_validation as cv
from .const import (
DOMAIN,
KEY_DEVICES,
KEY_ENABLED,
KEY_EXTERNAL_ID,
KEY_FLEX_SCHEDULES,
KEY_ID,
KEY_MAC_ADDRESS,
KEY_MODEL,
KEY_NAME,
KEY_SCHEDULES,
KEY_SERIAL_NUMBER,
KEY_STATUS,
KEY_USERNAME,
KEY_ZONES,
MODEL_GENERATION_1,
SERVICE_PAUSE_WATERING,
SERVICE_RESUME_WATERING,
)
from .webhooks import LISTEN_EVENT_TYPES, WEBHOOK_CONST_ID
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICES = "devices"
ATTR_DURATION = "duration"
PERMISSION_ERROR = "7"
PAUSE_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_DEVICES): cv.string,
vol.Optional(ATTR_DURATION, default=60): cv.positive_int,
}
)
RESUME_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_DEVICES): cv.string})
class RachioPerson:
"""Represent a Rachio user."""
def __init__(self, rachio, config_entry):
"""Create an object from the provided API instance."""
# Use API token to get user ID
self.rachio = rachio
self.config_entry = config_entry
self.username = None
self._id = None
self._controllers = []
def setup(self, hass):
"""Rachio device setup."""
all_devices = []
can_pause = False
response = self.rachio.person.info()
assert int(response[0][KEY_STATUS]) == HTTP_OK, "API key error"
self._id = response[1][KEY_ID]
# Use user ID to get user data
data = self.rachio.person.get(self._id)
assert int(data[0][KEY_STATUS]) == HTTP_OK, "User ID error"
self.username = data[1][KEY_USERNAME]
devices = data[1][KEY_DEVICES]
for controller in devices:
webhooks = self.rachio.notification.get_device_webhook(controller[KEY_ID])[
1
]
# The API does not provide a way to tell if a controller is shared
# or if they are the owner. To work around this problem we fetch the webooks
# before we setup the device so we can skip it instead of failing.
# webhooks are normally a list, however if there is an error
# rachio hands us back a dict
if isinstance(webhooks, dict):
if webhooks.get("code") == PERMISSION_ERROR:
_LOGGER.info(
"Not adding controller '%s', only controllers owned by '%s' may be added",
controller[KEY_NAME],
self.username,
)
else:
_LOGGER.error(
"Failed to add rachio controller '%s' because of an error: %s",
controller[KEY_NAME],
webhooks.get("error", "Unknown Error"),
)
continue
rachio_iro = RachioIro(hass, self.rachio, controller, webhooks)
rachio_iro.setup()
self._controllers.append(rachio_iro)
all_devices.append(rachio_iro.name)
# Generation 1 controllers don't support pause or resume
if rachio_iro.model.split("_")[0] != MODEL_GENERATION_1:
can_pause = True
_LOGGER.info('Using Rachio API as user "%s"', self.username)
def pause_water(service):
"""Service to pause watering on all or specific controllers."""
duration = service.data[ATTR_DURATION]
devices = service.data.get(ATTR_DEVICES, all_devices)
for iro in self._controllers:
if iro.name in devices:
iro.pause_watering(duration)
def resume_water(service):
"""Service to resume watering on all or specific controllers."""
devices = service.data.get(ATTR_DEVICES, all_devices)
for iro in self._controllers:
if iro.name in devices:
iro.resume_watering()
if can_pause:
hass.services.register(
DOMAIN,
SERVICE_PAUSE_WATERING,
pause_water,
schema=PAUSE_SERVICE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_RESUME_WATERING,
resume_water,
schema=RESUME_SERVICE_SCHEMA,
)
@property
def user_id(self) -> str:
"""Get the user ID as defined by the Rachio API."""
return self._id
@property
def controllers(self) -> list:
"""Get a list of controllers managed by this account."""
return self._controllers
def start_multiple_zones(self, zones) -> None:
"""Start multiple zones."""
self.rachio.zone.start_multiple(zones)
class RachioIro:
"""Represent a Rachio Iro."""
def __init__(self, hass, rachio, data, webhooks):
"""Initialize a Rachio device."""
self.hass = hass
self.rachio = rachio
self._id = data[KEY_ID]
self.name = data[KEY_NAME]
self.serial_number = data[KEY_SERIAL_NUMBER]
self.mac_address = data[KEY_MAC_ADDRESS]
self.model = data[KEY_MODEL]
self._zones = data[KEY_ZONES]
self._schedules = data[KEY_SCHEDULES]
self._flex_schedules = data[KEY_FLEX_SCHEDULES]
self._init_data = data
self._webhooks = webhooks
_LOGGER.debug('%s has ID "%s"', self, self.controller_id)
def setup(self):
"""Rachio Iro setup for webhooks."""
# Listen for all updates
self._init_webhooks()
def _init_webhooks(self) -> None:
"""Start getting updates from the Rachio API."""
current_webhook_id = None
# First delete any old webhooks that may have stuck around
def _deinit_webhooks(_) -> None:
"""Stop getting updates from the Rachio API."""
if not self._webhooks:
# We fetched webhooks when we created the device, however if we call _init_webhooks
# again we need to fetch again
self._webhooks = self.rachio.notification.get_device_webhook(
self.controller_id
)[1]
for webhook in self._webhooks:
if (
webhook[KEY_EXTERNAL_ID].startswith(WEBHOOK_CONST_ID)
or webhook[KEY_ID] == current_webhook_id
):
self.rachio.notification.delete(webhook[KEY_ID])
self._webhooks = None
_deinit_webhooks(None)
# Choose which events to listen for and get their IDs
event_types = []
for event_type in self.rachio.notification.get_webhook_event_type()[1]:
if event_type[KEY_NAME] in LISTEN_EVENT_TYPES:
event_types.append({"id": event_type[KEY_ID]})
# Register to listen to these events from the device
url = self.rachio.webhook_url
auth = WEBHOOK_CONST_ID + self.rachio.webhook_auth
new_webhook = self.rachio.notification.add(
self.controller_id, auth, url, event_types
)
# Save ID for deletion at shutdown
current_webhook_id = new_webhook[1][KEY_ID]
self.hass.bus.listen(EVENT_HOMEASSISTANT_STOP, _deinit_webhooks)
def __str__(self) -> str:
"""Display the controller as a string."""
return f'Rachio controller "{self.name}"'
@property
def controller_id(self) -> str:
"""Return the Rachio API controller ID."""
return self._id
@property
def current_schedule(self) -> str:
"""Return the schedule that the device is running right now."""
return self.rachio.device.current_schedule(self.controller_id)[1]
@property
def init_data(self) -> dict:
"""Return the information used to set up the controller."""
return self._init_data
def list_zones(self, include_disabled=False) -> list:
"""Return a list of the zone dicts connected to the device."""
# All zones
if include_disabled:
return self._zones
# Only enabled zones
return [z for z in self._zones if z[KEY_ENABLED]]
def get_zone(self, zone_id) -> Optional[dict]:
"""Return the zone with the given ID."""
for zone in self.list_zones(include_disabled=True):
if zone[KEY_ID] == zone_id:
return zone
return None
def list_schedules(self) -> list:
"""Return a list of fixed schedules."""
return self._schedules
def list_flex_schedules(self) -> list:
"""Return a list of flex schedules."""
return self._flex_schedules
def stop_watering(self) -> None:
"""Stop watering all zones connected to this controller."""
self.rachio.device.stop_water(self.controller_id)
_LOGGER.info("Stopped watering of all zones on %s", self)
def pause_watering(self, duration) -> None:
"""Pause watering on this controller."""
self.rachio.device.pause_zone_run(self.controller_id, duration * 60)
_LOGGER.debug("Paused watering on %s for %s minutes", self, duration)
def resume_watering(self) -> None:
"""Resume paused watering on this controller."""
self.rachio.device.resume_zone_run(self.controller_id)
_LOGGER.debug("Resuming watering on %s", self)
| {
"repo_name": "turbokongen/home-assistant",
"path": "homeassistant/components/rachio/device.py",
"copies": "4",
"size": "9585",
"license": "apache-2.0",
"hash": -8905244874139958000,
"line_mean": 34.36900369,
"line_max": 99,
"alpha_frac": 0.5826812728,
"autogenerated": false,
"ratio": 3.949320148331273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019949903278399487,
"num_lines": 271
} |
"""Adapter to wrap the rachiopy api for home assistant."""
import logging
from typing import Optional
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, HTTP_OK
from .const import (
KEY_DEVICES,
KEY_ENABLED,
KEY_EXTERNAL_ID,
KEY_FLEX_SCHEDULES,
KEY_ID,
KEY_MAC_ADDRESS,
KEY_MODEL,
KEY_NAME,
KEY_SCHEDULES,
KEY_SERIAL_NUMBER,
KEY_STATUS,
KEY_USERNAME,
KEY_ZONES,
)
from .webhooks import LISTEN_EVENT_TYPES, WEBHOOK_CONST_ID
_LOGGER = logging.getLogger(__name__)
class RachioPerson:
"""Represent a Rachio user."""
def __init__(self, rachio, config_entry):
"""Create an object from the provided API instance."""
# Use API token to get user ID
self.rachio = rachio
self.config_entry = config_entry
self.username = None
self._id = None
self._controllers = []
def setup(self, hass):
"""Rachio device setup."""
response = self.rachio.person.getInfo()
assert int(response[0][KEY_STATUS]) == HTTP_OK, "API key error"
self._id = response[1][KEY_ID]
# Use user ID to get user data
data = self.rachio.person.get(self._id)
assert int(data[0][KEY_STATUS]) == HTTP_OK, "User ID error"
self.username = data[1][KEY_USERNAME]
devices = data[1][KEY_DEVICES]
for controller in devices:
webhooks = self.rachio.notification.getDeviceWebhook(controller[KEY_ID])[1]
# The API does not provide a way to tell if a controller is shared
# or if they are the owner. To work around this problem we fetch the webooks
# before we setup the device so we can skip it instead of failing.
# webhooks are normally a list, however if there is an error
# rachio hands us back a dict
if isinstance(webhooks, dict):
_LOGGER.error(
"Failed to add rachio controller '%s' because of an error: %s",
controller[KEY_NAME],
webhooks.get("error", "Unknown Error"),
)
continue
rachio_iro = RachioIro(hass, self.rachio, controller, webhooks)
rachio_iro.setup()
self._controllers.append(rachio_iro)
_LOGGER.info('Using Rachio API as user "%s"', self.username)
@property
def user_id(self) -> str:
"""Get the user ID as defined by the Rachio API."""
return self._id
@property
def controllers(self) -> list:
"""Get a list of controllers managed by this account."""
return self._controllers
class RachioIro:
"""Represent a Rachio Iro."""
def __init__(self, hass, rachio, data, webhooks):
"""Initialize a Rachio device."""
self.hass = hass
self.rachio = rachio
self._id = data[KEY_ID]
self.name = data[KEY_NAME]
self.serial_number = data[KEY_SERIAL_NUMBER]
self.mac_address = data[KEY_MAC_ADDRESS]
self.model = data[KEY_MODEL]
self._zones = data[KEY_ZONES]
self._schedules = data[KEY_SCHEDULES]
self._flex_schedules = data[KEY_FLEX_SCHEDULES]
self._init_data = data
self._webhooks = webhooks
_LOGGER.debug('%s has ID "%s"', str(self), self.controller_id)
def setup(self):
"""Rachio Iro setup for webhooks."""
# Listen for all updates
self._init_webhooks()
def _init_webhooks(self) -> None:
"""Start getting updates from the Rachio API."""
current_webhook_id = None
# First delete any old webhooks that may have stuck around
def _deinit_webhooks(_) -> None:
"""Stop getting updates from the Rachio API."""
if not self._webhooks:
# We fetched webhooks when we created the device, however if we call _init_webhooks
# again we need to fetch again
self._webhooks = self.rachio.notification.getDeviceWebhook(
self.controller_id
)[1]
for webhook in self._webhooks:
if (
webhook[KEY_EXTERNAL_ID].startswith(WEBHOOK_CONST_ID)
or webhook[KEY_ID] == current_webhook_id
):
self.rachio.notification.deleteWebhook(webhook[KEY_ID])
self._webhooks = None
_deinit_webhooks(None)
# Choose which events to listen for and get their IDs
event_types = []
for event_type in self.rachio.notification.getWebhookEventType()[1]:
if event_type[KEY_NAME] in LISTEN_EVENT_TYPES:
event_types.append({"id": event_type[KEY_ID]})
# Register to listen to these events from the device
url = self.rachio.webhook_url
auth = WEBHOOK_CONST_ID + self.rachio.webhook_auth
new_webhook = self.rachio.notification.postWebhook(
self.controller_id, auth, url, event_types
)
# Save ID for deletion at shutdown
current_webhook_id = new_webhook[1][KEY_ID]
self.hass.bus.listen(EVENT_HOMEASSISTANT_STOP, _deinit_webhooks)
def __str__(self) -> str:
"""Display the controller as a string."""
return f'Rachio controller "{self.name}"'
@property
def controller_id(self) -> str:
"""Return the Rachio API controller ID."""
return self._id
@property
def current_schedule(self) -> str:
"""Return the schedule that the device is running right now."""
return self.rachio.device.getCurrentSchedule(self.controller_id)[1]
@property
def init_data(self) -> dict:
"""Return the information used to set up the controller."""
return self._init_data
def list_zones(self, include_disabled=False) -> list:
"""Return a list of the zone dicts connected to the device."""
# All zones
if include_disabled:
return self._zones
# Only enabled zones
return [z for z in self._zones if z[KEY_ENABLED]]
def get_zone(self, zone_id) -> Optional[dict]:
"""Return the zone with the given ID."""
for zone in self.list_zones(include_disabled=True):
if zone[KEY_ID] == zone_id:
return zone
return None
def list_schedules(self) -> list:
"""Return a list of fixed schedules."""
return self._schedules
def list_flex_schedules(self) -> list:
"""Return a list of flex schedules."""
return self._flex_schedules
def stop_watering(self) -> None:
"""Stop watering all zones connected to this controller."""
self.rachio.device.stopWater(self.controller_id)
_LOGGER.info("Stopped watering of all zones on %s", str(self))
| {
"repo_name": "mKeRix/home-assistant",
"path": "homeassistant/components/rachio/device.py",
"copies": "7",
"size": "6803",
"license": "mit",
"hash": 2191683429331065300,
"line_mean": 34.4322916667,
"line_max": 99,
"alpha_frac": 0.5951785977,
"autogenerated": false,
"ratio": 3.900802752293578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000231793506898844,
"num_lines": 192
} |
"""Adaptive active learning
"""
import copy
import numpy as np
from joblib import Parallel, delayed
from libact.base.dataset import Dataset
from libact.base.interfaces import QueryStrategy, ContinuousModel
from libact.utils import inherit_docstring_from, seed_random_state, zip
from libact.models.multilabel import BinaryRelevance
def _calc_approx_err(br, dataset, X_pool):
br.train(dataset)
br_real = br.predict_real(X_pool)
pos = np.copy(br_real)
pos[br_real<0] = 1
pos = np.max((1.-pos), axis=1)
neg = np.copy(br_real)
neg[br_real>0] = -1
neg = np.max((1.+neg), axis=1)
err = neg + pos
return np.sum(err)
class AdaptiveActiveLearning(QueryStrategy):
r"""Adaptive Active Learning
This approach combines Max Margin Uncertainty Sampling and Label
Cardinality Inconsistency.
Parameters
----------
base_clf : ContinuousModel object instance
The base learner for binary relavance.
betas : list of float, 0 <= beta <= 1, default: [0., 0.1, ..., 0.9, 1.]
List of trade-off parameter that balances the relative importance
degrees of the two measures.
random_state : {int, np.random.RandomState instance, None}, optional (default=None)
If int or None, random_state is passed as parameter to generate
np.random.RandomState instance. if np.random.RandomState instance,
random_state is the random number generate.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are
used. If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
Examples
--------
Here is an example of declaring a MMC query_strategy object:
.. code-block:: python
from libact.query_strategies.multilabel import AdaptiveActiveLearning
from sklearn.linear_model import LogisticRegression
qs = AdaptiveActiveLearning(
dataset, # Dataset object
base_clf=LogisticRegression()
)
References
----------
.. [1] Li, Xin, and Yuhong Guo. "Active Learning with Multi-Label SVM
Classification." IJCAI. 2013.
"""
def __init__(self, dataset, base_clf, betas=None, n_jobs=1,
random_state=None):
super(AdaptiveActiveLearning, self).__init__(dataset)
self.n_labels = len(self.dataset.data[0][1])
self.base_clf = copy.deepcopy(base_clf)
# TODO check beta value
self.betas = betas
if self.betas is None:
self.betas = [i/10. for i in range(0, 11)]
self.n_jobs = n_jobs
self.random_state_ = seed_random_state(random_state)
@inherit_docstring_from(QueryStrategy)
def make_query(self):
dataset = self.dataset
X, Y = dataset.get_labeled_entries()
unlabeled_entry_ids, X_pool = dataset.get_unlabeled_entries()
Y = np.array(Y)
X, X_pool = np.array(X), np.array(X_pool)
clf = BinaryRelevance(self.base_clf, n_jobs=self.n_jobs)
clf.train(dataset)
real = clf.predict_real(X_pool)
pred = clf.predict(X_pool)
# Separation Margin
pos = np.copy(real)
pos[real<=0] = np.inf
neg = np.copy(real)
neg[real>=0] = -np.inf
separation_margin = pos.min(axis=1) - neg.max(axis=1)
uncertainty = 1. / separation_margin
# Label Cardinality Inconsistency
average_pos_lbl = Y.mean(axis=0).sum()
label_cardinality = np.sqrt((pred.sum(axis=1) - average_pos_lbl)**2)
candidate_idx_set = set()
for b in self.betas:
# score shape = (len(X_pool), )
score = uncertainty**b * label_cardinality**(1.-b)
for idx in np.where(score == np.max(score))[0]:
candidate_idx_set.add(idx)
candidates = list(candidate_idx_set)
approx_err = Parallel(n_jobs=self.n_jobs, backend='threading')(
delayed(_calc_approx_err)(
BinaryRelevance(self.base_clf),
Dataset(np.vstack((X, X_pool[idx])), np.vstack((Y, pred[idx]))),
X_pool)
for idx in candidates)
#approx_err = []
#for idx in candidates:
# ds = Dataset(np.vstack((X, X_pool[idx])), np.vstack((Y, pred[idx])))
# br = BinaryRelevance(self.base_clf)
# br.train(ds)
# br_real = br.predict_real(X_pool)
# pos = np.copy(br_real)
# pos[br_real<0] = 1
# pos = np.max((1.-pos), axis=1)
# neg = np.copy(br_real)
# neg[br_real>0] = -1
# neg = np.max((1.+neg), axis=1)
# err = neg + pos
# approx_err.append(np.sum(err))
choices = np.where(np.array(approx_err) == np.min(approx_err))[0]
ask_idx = candidates[self.random_state_.choice(choices)]
return unlabeled_entry_ids[ask_idx]
| {
"repo_name": "ntucllab/libact",
"path": "libact/query_strategies/multilabel/adaptive_active_learning.py",
"copies": "1",
"size": "5082",
"license": "bsd-2-clause",
"hash": -4465277713873521000,
"line_mean": 31.5769230769,
"line_max": 87,
"alpha_frac": 0.5979929162,
"autogenerated": false,
"ratio": 3.502412129565817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46004050457658163,
"avg_score": null,
"num_lines": null
} |
""" Adaptive Gradient Clipping
An impl of AGC, as per (https://arxiv.org/abs/2102.06171):
@article{brock2021high,
author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan},
title={High-Performance Large-Scale Image Recognition Without Normalization},
journal={arXiv preprint arXiv:},
year={2021}
}
Code references:
* Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets
* Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
def unitwise_norm(x, norm_type=2.0):
if x.ndim <= 1:
return x.norm(norm_type)
else:
# works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor
# might need special cases for other weights (possibly MHA) where this may not be true
return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True)
def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
for p in parameters:
if p.grad is None:
continue
p_data = p.detach()
g_data = p.grad.detach()
max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor)
grad_norm = unitwise_norm(g_data, norm_type=norm_type)
clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6))
new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad)
p.grad.detach().copy_(new_grads)
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/utils/agc.py",
"copies": "1",
"size": "1624",
"license": "apache-2.0",
"hash": -7188842237585664000,
"line_mean": 37.6666666667,
"line_max": 103,
"alpha_frac": 0.6828817734,
"autogenerated": false,
"ratio": 3.0583804143126176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4241262187712618,
"avg_score": null,
"num_lines": null
} |
# Adaptive Histogram Equalization
#
# This example shows off how to use adaptive histogram equalization to improve
# the contrast in the image. Adaptive histogram equalization splits the image
# into regions and then equalizes the histogram in those regions to improve
# the image contrast versus a global histogram equalization. Additionally,
# you may specify a clip limit to prevent the contrast from going wild.
import sensor, image, time
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
# A clip_limit of < 0 gives you normal adaptive histogram equalization
# which may result in huge amounts of contrast noise...
# A clip_limit of 1 does nothing. For best results go slightly higher
# than 1 like below. The higher you go the closer you get back to
# standard adaptive histogram equalization with huge contrast swings.
img = sensor.snapshot().histeq(adaptive=True, clip_limit=3)
print(clock.fps())
| {
"repo_name": "openmv/openmv",
"path": "scripts/examples/Arduino/Portenta-H7/04-Image-Filters/adaptive_histogram_equalization.py",
"copies": "2",
"size": "1062",
"license": "mit",
"hash": 732665345149227500,
"line_mean": 35.6206896552,
"line_max": 78,
"alpha_frac": 0.7589453861,
"autogenerated": false,
"ratio": 3.962686567164179,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5721631953264179,
"avg_score": null,
"num_lines": null
} |
"""Adaptive Smoothed Aggregation."""
from __future__ import absolute_import
from warnings import warn
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix, bsr_matrix, isspmatrix_csr,\
isspmatrix_csc, isspmatrix_bsr, eye, SparseEfficiencyWarning
from pyamg.multilevel import multilevel_solver
from pyamg.strength import symmetric_strength_of_connection,\
classical_strength_of_connection, evolution_strength_of_connection
from ..relaxation.relaxation import gauss_seidel, gauss_seidel_nr,\
gauss_seidel_ne, gauss_seidel_indexed, jacobi, polynomial
from pyamg.relaxation.smoothing import change_smoothers, rho_D_inv_A
from pyamg.krylov import gmres
from pyamg.util.linalg import norm, approximate_spectral_radius
from .aggregation import smoothed_aggregation_solver
from .aggregate import standard_aggregation, lloyd_aggregation
from .smooth import jacobi_prolongation_smoother,\
energy_prolongation_smoother, richardson_prolongation_smoother
from .tentative import fit_candidates
from pyamg.util.utils import amalgamate, levelize_strength_or_aggregation, \
levelize_smooth_or_improve_candidates
__all__ = ['adaptive_sa_solver']
def eliminate_local_candidates(x, AggOp, A, T, Ca=1.0, **kwargs):
"""Eliminate canidates locally.
Helper function that determines where to eliminate candidates locally
on a per aggregate basis.
Parameters
---------
x : array
n x 1 vector of new candidate
AggOp : CSR or CSC sparse matrix
Aggregation operator for the level that x was generated for
A : sparse matrix
Operator for the level that x was generated for
T : sparse matrix
Tentative prolongation operator for the level that x was generated for
Ca : scalar
Constant threshold parameter to decide when to drop candidates
Returns
-------
Nothing, x is modified in place
"""
if not (isspmatrix_csr(AggOp) or isspmatrix_csc(AggOp)):
raise TypeError('AggOp must be a CSR or CSC matrix')
else:
AggOp = AggOp.tocsc()
ndof = max(x.shape)
nPDEs = int(ndof/AggOp.shape[0])
def aggregate_wise_inner_product(z, AggOp, nPDEs, ndof):
"""Inner products per aggregate.
Helper function that calculates <z, z>_i, i.e., the
inner product of z only over aggregate i
Returns a vector of length num_aggregates where entry i is <z, z>_i
"""
z = np.ravel(z)*np.ravel(z)
innerp = np.zeros((1, AggOp.shape[1]), dtype=z.dtype)
for j in range(nPDEs):
innerp += z[slice(j, ndof, nPDEs)].reshape(1, -1) * AggOp
return innerp.reshape(-1, 1)
def get_aggregate_weights(AggOp, A, z, nPDEs, ndof):
"""Weights per aggregate.
Calculate local aggregate quantities
Return a vector of length num_aggregates where entry i is
(card(agg_i)/A.shape[0]) ( <Az, z>/rho(A) )
"""
rho = approximate_spectral_radius(A)
zAz = np.dot(z.reshape(1, -1), A*z.reshape(-1, 1))
card = nPDEs*(AggOp.indptr[1:]-AggOp.indptr[:-1])
weights = (np.ravel(card)*zAz)/(A.shape[0]*rho)
return weights.reshape(-1, 1)
# Run test 1, which finds where x is small relative to its energy
weights = Ca*get_aggregate_weights(AggOp, A, x, nPDEs, ndof)
mask1 = aggregate_wise_inner_product(x, AggOp, nPDEs, ndof) <= weights
# Run test 2, which finds where x is already approximated
# accurately by the existing T
projected_x = x - T*(T.T*x)
mask2 = aggregate_wise_inner_product(projected_x,
AggOp, nPDEs, ndof) <= weights
# Combine masks and zero out corresponding aggregates in x
mask = np.ravel(mask1 + mask2).nonzero()[0]
if mask.shape[0] > 0:
mask = nPDEs*AggOp[:, mask].indices
for j in range(nPDEs):
x[mask+j] = 0.0
def unpack_arg(v):
"""Use for local methods."""
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
def adaptive_sa_solver(A, initial_candidates=None, symmetry='hermitian',
pdef=True, num_candidates=1, candidate_iters=5,
improvement_iters=0, epsilon=0.1,
max_levels=10, max_coarse=10, aggregate='standard',
prepostsmoother=('gauss_seidel',
{'sweep': 'symmetric'}),
smooth=('jacobi', {}), strength='symmetric',
coarse_solver='pinv2',
eliminate_local=(False, {'Ca': 1.0}), keep=False,
**kwargs):
"""Create a multilevel solver using Adaptive Smoothed Aggregation (aSA).
Parameters
----------
A : csr_matrix, bsr_matrix
Square matrix in CSR or BSR format
initial_candidates : None, n x m dense matrix
If a matrix, then this forms the basis for the first m candidates.
Also in this case, the initial setup stage is skipped, because this
provides the first candidate(s). If None, then a random initial guess
and relaxation are used to inform the initial candidate.
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
Note that for the strictly real case, these two options are the same
Note that this flag does not denote definiteness of the operator
pdef : bool
True or False, whether A is known to be positive definite.
num_candidates : integer
Number of near-nullspace candidates to generate
candidate_iters : integer
Number of smoothing passes/multigrid cycles used at each level of
the adaptive setup phase
improvement_iters : integer
Number of times each candidate is improved
epsilon : float
Target convergence factor
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
prepostsmoother : string or dict
Pre- and post-smoother used in the adaptive method
strength : ['symmetric', 'classical', 'evolution', ('predefined', {'C': csr_matrix}), None]
Method used to determine the strength of connection between unknowns of
the linear system. See smoothed_aggregation_solver(...) documentation.
aggregate : ['standard', 'lloyd', 'naive', ('predefined', {'AggOp': csr_matrix})]
Method used to aggregate nodes. See smoothed_aggregation_solver(...)
documentation.
smooth : ['jacobi', 'richardson', 'energy', None]
Method used used to smooth the tentative prolongator. See
smoothed_aggregation_solver(...) documentation
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
eliminate_local : tuple
Length 2 tuple. If the first entry is True, then eliminate candidates
where they aren't needed locally, using the second entry of the tuple
to contain arguments to local elimination routine. Given the rigid
sparse data structures, this doesn't help much, if at all, with
complexity. Its more of a diagnostic utility.
keep: bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), and aggregation (AggOp) are kept.
Returns
-------
multilevel_solver : multilevel_solver
Smoothed aggregation solver with adaptively generated candidates
Notes
-----
- Floating point value representing the "work" required to generate
the solver. This value is the total cost of just relaxation, relative
to the fine grid. The relaxation method used is assumed to symmetric
Gauss-Seidel.
- Unlike the standard Smoothed Aggregation (SA) method, adaptive SA does
not require knowledge of near-nullspace candidate vectors. Instead, an
adaptive procedure computes one or more candidates 'from scratch'. This
approach is useful when no candidates are known or the candidates have
been invalidated due to changes to matrix A.
Examples
--------
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.aggregation import adaptive_sa_solver
>>> import numpy as np
>>> A=stencil_grid([[-1,-1,-1],[-1,8.0,-1],[-1,-1,-1]], (31,31),format='csr')
>>> [asa,work] = adaptive_sa_solver(A,num_candidates=1)
>>> residuals=[]
>>> x=asa.solve(b=np.ones((A.shape[0],)), x0=np.ones((A.shape[0],)), residuals=residuals)
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation (alpha SA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn("Implicit conversion of A to CSR", SparseEfficiencyWarning)
except BaseException:
raise TypeError('Argument A must have type csr_matrix or\
bsr_matrix, or be convertible to csr_matrix')
A = A.asfptype()
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Track work in terms of relaxation
work = np.zeros((1,))
# Levelize the user parameters, so that they become lists describing the
# desired user option on each level.
max_levels, max_coarse, strength =\
levelize_strength_or_aggregation(strength, max_levels, max_coarse)
max_levels, max_coarse, aggregate =\
levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)
# Develop initial candidate(s). Note that any predefined aggregation is
# preserved.
if initial_candidates is None:
B, aggregate, strength =\
initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
max_levels, max_coarse, aggregate,
prepostsmoother, smooth, strength, work)
# Normalize B
B = (1.0/norm(B, 'inf')) * B
num_candidates -= 1
else:
# Otherwise, use predefined candidates
B = initial_candidates
num_candidates -= B.shape[1]
# Generate Aggregation and Strength Operators (the brute force way)
sa = smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth, strength=strength,
max_levels=max_levels,
max_coarse=max_coarse,
aggregate=aggregate,
coarse_solver=coarse_solver,
improve_candidates=None, keep=True,
**kwargs)
if len(sa.levels) > 1:
# Set strength-of-connection and aggregation
aggregate = [('predefined', {'AggOp': sa.levels[i].AggOp.tocsr()})
for i in range(len(sa.levels) - 1)]
strength = [('predefined', {'C': sa.levels[i].C.tocsr()})
for i in range(len(sa.levels) - 1)]
# Develop additional candidates
for i in range(num_candidates):
x = general_setup_stage(
smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate,
strength=strength,
improve_candidates=None,
keep=True, **kwargs),
symmetry, candidate_iters, prepostsmoother, smooth,
eliminate_local, coarse_solver, work)
# Normalize x and add to candidate list
x = x/norm(x, 'inf')
if np.isinf(x[0]) or np.isnan(x[0]):
raise ValueError('Adaptive candidate is all 0.')
B = np.hstack((B, x.reshape(-1, 1)))
# Improve candidates
if B.shape[1] > 1 and improvement_iters > 0:
b = np.zeros((A.shape[0], 1), dtype=A.dtype)
for i in range(improvement_iters):
for j in range(B.shape[1]):
# Run a V-cycle built on everything except candidate j, while
# using candidate j as the initial guess
x0 = B[:, 0]
B = B[:, 1:]
sa_temp =\
smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate,
strength=strength,
improve_candidates=None,
keep=True, **kwargs)
x = sa_temp.solve(b, x0=x0,
tol=float(np.finfo(np.float).tiny),
maxiter=candidate_iters, cycle='V')
work[:] += 2 * sa_temp.operator_complexity() *\
sa_temp.levels[0].A.nnz * candidate_iters
# Apply local elimination
elim, elim_kwargs = unpack_arg(eliminate_local)
if elim is True:
x = x/norm(x, 'inf')
eliminate_local_candidates(x, sa_temp.levels[0].AggOp, A,
sa_temp.levels[0].T,
**elim_kwargs)
# Normalize x and add to candidate list
x = x/norm(x, 'inf')
if np.isinf(x[0]) or np.isnan(x[0]):
raise ValueError('Adaptive candidate is all 0.')
B = np.hstack((B, x.reshape(-1, 1)))
elif improvement_iters > 0:
# Special case for improving a single candidate
max_levels = len(aggregate) + 1
max_coarse = 0
for i in range(improvement_iters):
B, aggregate, strength =\
initial_setup_stage(A, symmetry, pdef, candidate_iters,
epsilon, max_levels, max_coarse,
aggregate, prepostsmoother, smooth,
strength, work, initial_candidate=B)
# Normalize B
B = (1.0/norm(B, 'inf'))*B
return [smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate, strength=strength,
improve_candidates=None, keep=keep,
**kwargs),
work[0]/A.nnz]
def initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
max_levels, max_coarse, aggregate, prepostsmoother,
smooth, strength, work, initial_candidate=None):
"""Compute aggregation and the first near-nullspace candidate following Algorithm 3 in Brezina et al.
Parameters
----------
candidate_iters
number of test relaxation iterations
epsilon
minimum acceptable relaxation convergence factor
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation (aSA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
http://www.cs.umn.edu/~maclach/research/aSA2.pdf
"""
# Define relaxation routine
def relax(A, x):
fn, kwargs = unpack_arg(prepostsmoother)
if fn == 'gauss_seidel':
gauss_seidel(A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_nr':
gauss_seidel_nr(A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_ne':
gauss_seidel_ne(A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'jacobi':
jacobi(A, x, np.zeros_like(x), iterations=1,
omega=1.0 / rho_D_inv_A(A))
elif fn == 'richardson':
polynomial(A, x, np.zeros_like(x), iterations=1,
coefficients=[1.0/approximate_spectral_radius(A)])
elif fn == 'gmres':
x[:] = (gmres(A, np.zeros_like(x), x0=x,
maxiter=candidate_iters)[0]).reshape(x.shape)
else:
raise TypeError('Unrecognized smoother')
# flag for skipping steps f-i in step 4
skip_f_to_i = True
# step 1
A_l = A
if initial_candidate is None:
x = np.random.rand(A_l.shape[0], 1).astype(A_l.dtype)
# The following type check matches the usual 'complex' type,
# but also numpy data types such as 'complex64', 'complex128'
# and 'complex256'.
if A_l.dtype.name.startswith('complex'):
x = x + 1.0j*np.random.rand(A_l.shape[0], 1)
else:
x = np.array(initial_candidate, dtype=A_l.dtype)
# step 2
relax(A_l, x)
work[:] += A_l.nnz * candidate_iters*2
# step 3
# not advised to stop the iteration here: often the first relaxation pass
# _is_ good, but the remaining passes are poor
# if x_A_x/x_A_x_old < epsilon:
# # relaxation alone is sufficient
# print 'relaxation alone works: %g'%(x_A_x/x_A_x_old)
# return x, []
# step 4
As = [A]
xs = [x]
Ps = []
AggOps = []
StrengthOps = []
while A.shape[0] > max_coarse and max_levels > 1:
# The real check to break from the while loop is below
# Begin constructing next level
fn, kwargs = unpack_arg(strength[len(As)-1]) # step 4b
if fn == 'symmetric':
C_l = symmetric_strength_of_connection(A_l, **kwargs)
# Diagonal must be nonzero
C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
elif fn == 'classical':
C_l = classical_strength_of_connection(A_l, **kwargs)
# Diagonal must be nonzero
C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
if isspmatrix_bsr(A_l):
C_l = amalgamate(C_l, A_l.blocksize[0])
elif (fn == 'ode') or (fn == 'evolution'):
C_l = evolution_strength_of_connection(A_l,
np.ones(
(A_l.shape[0], 1),
dtype=A.dtype),
**kwargs)
elif fn == 'predefined':
C_l = kwargs['C'].tocsr()
elif fn is None:
C_l = A_l.tocsr()
else:
raise ValueError('unrecognized strength of connection method: %s' %
str(fn))
# In SA, strength represents "distance", so we take magnitude of
# complex values
if C_l.dtype.name.startswith('complex'):
C_l.data = np.abs(C_l.data)
# Create a unified strength framework so that large values represent
# strong connections and small values represent weak connections
if (fn == 'ode') or (fn == 'evolution') or (fn == 'energy_based'):
C_l.data = 1.0 / C_l.data
# aggregation
fn, kwargs = unpack_arg(aggregate[len(As) - 1])
if fn == 'standard':
AggOp = standard_aggregation(C_l, **kwargs)[0]
elif fn == 'lloyd':
AggOp = lloyd_aggregation(C_l, **kwargs)[0]
elif fn == 'predefined':
AggOp = kwargs['AggOp'].tocsr()
else:
raise ValueError('unrecognized aggregation method %s' % str(fn))
T_l, x = fit_candidates(AggOp, x) # step 4c
fn, kwargs = unpack_arg(smooth[len(As)-1]) # step 4d
if fn == 'jacobi':
P_l = jacobi_prolongation_smoother(A_l, T_l, C_l, x, **kwargs)
elif fn == 'richardson':
P_l = richardson_prolongation_smoother(A_l, T_l, **kwargs)
elif fn == 'energy':
P_l = energy_prolongation_smoother(A_l, T_l, C_l, x, None,
(False, {}), **kwargs)
elif fn is None:
P_l = T_l
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# R should reflect A's structure # step 4e
if symmetry == 'symmetric':
A_l = P_l.T.asformat(P_l.format) * A_l * P_l
elif symmetry == 'hermitian':
A_l = P_l.H.asformat(P_l.format) * A_l * P_l
StrengthOps.append(C_l)
AggOps.append(AggOp)
Ps.append(P_l)
As.append(A_l)
# skip to step 5 as in step 4e
if (A_l.shape[0] <= max_coarse) or (len(AggOps) + 1 >= max_levels):
break
if not skip_f_to_i:
x_hat = x.copy() # step 4g
relax(A_l, x) # step 4h
work[:] += A_l.nnz*candidate_iters*2
if pdef is True:
x_A_x = np.dot(np.conjugate(x).T, A_l*x)
xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l*x_hat)
err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)
else:
# use A.H A inner-product
Ax = A_l * x
# Axhat = A_l * x_hat
x_A_x = np.dot(np.conjugate(Ax).T, Ax)
xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l*x_hat)
err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)
if err_ratio < epsilon: # step 4i
# print "sufficient convergence, skipping"
skip_f_to_i = True
if x_A_x == 0:
x = x_hat # need to restore x
else:
# just carry out relaxation, don't check for convergence
relax(A_l, x) # step 4h
work[:] += 2 * A_l.nnz * candidate_iters
# store xs for diagnostic use and for use in step 5
xs.append(x)
# step 5
# Extend coarse-level candidate to the finest level
# --> note that we start with the x from the second coarsest level
x = xs[-1]
# make sure that xs[-1] has been relaxed by step 4h, i.e. relax(As[-2], x)
for lev in range(len(Ps)-2, -1, -1): # lev = coarsest ... finest-1
P = Ps[lev] # I: lev --> lev+1
A = As[lev] # A on lev+1
x = P * x
relax(A, x)
work[:] += A.nnz*candidate_iters*2
# Set predefined strength of connection and aggregation
if len(AggOps) > 1:
aggregate = [('predefined', {'AggOp': AggOps[i]})
for i in range(len(AggOps))]
strength = [('predefined', {'C': StrengthOps[i]})
for i in range(len(StrengthOps))]
return x, aggregate, strength # first candidate
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother,
smooth, eliminate_local, coarse_solver, work):
"""Compute additional candidates and improvements following Algorithm 4 in Brezina et al.
Parameters
----------
candidate_iters
number of test relaxation iterations
epsilon
minimum acceptable relaxation convergence factor
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation (alphaSA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
http://www.cs.umn.edu/~maclach/research/aSA2.pdf
"""
def make_bridge(T):
M, N = T.shape
K = T.blocksize[0]
bnnz = T.indptr[-1]
# the K+1 represents the new dof introduced by the new candidate. the
# bridge 'T' ignores this new dof and just maps zeros there
data = np.zeros((bnnz, K+1, K), dtype=T.dtype)
data[:, :-1, :] = T.data
return bsr_matrix((data, T.indices, T.indptr),
shape=((K + 1) * int(M / K), N))
def expand_candidates(B_old, nodesize):
# insert a new dof that is always zero, to create NullDim+1 dofs per
# node in B
NullDim = B_old.shape[1]
nnodes = int(B_old.shape[0] / nodesize)
Bnew = np.zeros((nnodes, nodesize+1, NullDim), dtype=B_old.dtype)
Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
return Bnew.reshape(-1, NullDim)
levels = ml.levels
x = np.random.rand(levels[0].A.shape[0], 1)
if levels[0].A.dtype.name.startswith('complex'):
x = x + 1.0j*np.random.rand(levels[0].A.shape[0], 1)
b = np.zeros_like(x)
x = ml.solve(b, x0=x, tol=float(np.finfo(np.float).tiny),
maxiter=candidate_iters)
work[:] += ml.operator_complexity()*ml.levels[0].A.nnz*candidate_iters*2
T0 = levels[0].T.copy()
# TEST FOR CONVERGENCE HERE
for i in range(len(ml.levels) - 2):
# alpha-SA paper does local elimination here, but after talking
# to Marian, its not clear that this helps things
# fn, kwargs = unpack_arg(eliminate_local)
# if fn == True:
# eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
# levels[i].T, **kwargs)
# add candidate to B
B = np.hstack((levels[i].B, x.reshape(-1, 1)))
# construct Ptent
T, R = fit_candidates(levels[i].AggOp, B)
levels[i].T = T
x = R[:, -1].reshape(-1, 1)
# smooth P
fn, kwargs = unpack_arg(smooth[i])
if fn == 'jacobi':
levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
levels[i].C, R,
**kwargs)
elif fn == 'richardson':
levels[i].P = richardson_prolongation_smoother(levels[i].A, T,
**kwargs)
elif fn == 'energy':
levels[i].P = energy_prolongation_smoother(levels[i].A, T,
levels[i].C, R, None,
(False, {}), **kwargs)
x = R[:, -1].reshape(-1, 1)
elif fn is None:
levels[i].P = T
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# construct R
if symmetry == 'symmetric': # R should reflect A's structure
levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
elif symmetry == 'hermitian':
levels[i].R = levels[i].P.H.asformat(levels[i].P.format)
# construct coarse A
levels[i+1].A = levels[i].R * levels[i].A * levels[i].P
# construct bridging P
T_bridge = make_bridge(levels[i+1].T)
R_bridge = levels[i+2].B
# smooth bridging P
fn, kwargs = unpack_arg(smooth[i+1])
if fn == 'jacobi':
levels[i+1].P = jacobi_prolongation_smoother(levels[i+1].A,
T_bridge,
levels[i+1].C,
R_bridge, **kwargs)
elif fn == 'richardson':
levels[i+1].P = richardson_prolongation_smoother(levels[i+1].A,
T_bridge,
**kwargs)
elif fn == 'energy':
levels[i+1].P = energy_prolongation_smoother(levels[i+1].A,
T_bridge,
levels[i+1].C,
R_bridge, None,
(False, {}), **kwargs)
elif fn is None:
levels[i+1].P = T_bridge
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# construct the "bridging" R
if symmetry == 'symmetric': # R should reflect A's structure
levels[i+1].R = levels[i+1].P.T.asformat(levels[i+1].P.format)
elif symmetry == 'hermitian':
levels[i+1].R = levels[i+1].P.H.asformat(levels[i+1].P.format)
# run solver on candidate
solver = multilevel_solver(levels[i+1:], coarse_solver=coarse_solver)
change_smoothers(solver, presmoother=prepostsmoother,
postsmoother=prepostsmoother)
x = solver.solve(np.zeros_like(x), x0=x,
tol=float(np.finfo(np.float).tiny),
maxiter=candidate_iters)
work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
candidate_iters*2
# update values on next level
levels[i+1].B = R[:, :-1].copy()
levels[i+1].T = T_bridge
# note that we only use the x from the second coarsest level
fn, kwargs = unpack_arg(prepostsmoother)
for lvl in reversed(levels[:-2]):
x = lvl.P * x
work[:] += lvl.A.nnz*candidate_iters*2
if fn == 'gauss_seidel':
# only relax at nonzeros, so as not to mess up any locally dropped
# candidates
indices = np.ravel(x).nonzero()[0]
gauss_seidel_indexed(lvl.A, x, np.zeros_like(x), indices,
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_ne':
gauss_seidel_ne(lvl.A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_nr':
gauss_seidel_nr(lvl.A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'jacobi':
jacobi(lvl.A, x, np.zeros_like(x), iterations=1,
omega=1.0 / rho_D_inv_A(lvl.A))
elif fn == 'richardson':
polynomial(lvl.A, x, np.zeros_like(x), iterations=1,
coefficients=[1.0/approximate_spectral_radius(lvl.A)])
elif fn == 'gmres':
x[:] = (gmres(lvl.A, np.zeros_like(x), x0=x,
maxiter=candidate_iters)[0]).reshape(x.shape)
else:
raise TypeError('Unrecognized smoother')
# x will be dense again, so we have to drop locally again
elim, elim_kwargs = unpack_arg(eliminate_local)
if elim is True:
x = x/norm(x, 'inf')
eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
**elim_kwargs)
return x.reshape(-1, 1)
| {
"repo_name": "pyamg/pyamg",
"path": "pyamg/aggregation/adaptive.py",
"copies": "1",
"size": "32239",
"license": "mit",
"hash": 231700961214871070,
"line_mean": 41.5878467635,
"line_max": 105,
"alpha_frac": 0.5394398089,
"autogenerated": false,
"ratio": 3.8398046688899474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48792444777899474,
"avg_score": null,
"num_lines": null
} |
"""Adaptive Smoothed Aggregation"""
__docformat__ = "restructuredtext en"
from warnings import warn
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix, bsr_matrix, isspmatrix_csr,\
isspmatrix_csc, isspmatrix_bsr, eye, SparseEfficiencyWarning
from pyamg.multilevel import multilevel_solver
from pyamg.strength import symmetric_strength_of_connection,\
classical_strength_of_connection, evolution_strength_of_connection
from pyamg.relaxation import gauss_seidel, gauss_seidel_nr, gauss_seidel_ne,\
gauss_seidel_indexed, jacobi, polynomial
from pyamg.relaxation.smoothing import change_smoothers, rho_D_inv_A
from pyamg.krylov import gmres
from pyamg.util.linalg import norm, approximate_spectral_radius
from aggregation import smoothed_aggregation_solver
from aggregate import standard_aggregation, lloyd_aggregation
from smooth import jacobi_prolongation_smoother, energy_prolongation_smoother,\
richardson_prolongation_smoother
from tentative import fit_candidates
from pyamg.util.utils import amalgamate, levelize_strength_or_aggregation, \
levelize_smooth_or_improve_candidates
__all__ = ['adaptive_sa_solver']
def eliminate_local_candidates(x, AggOp, A, T, Ca=1.0, **kwargs):
"""
Helper function that determines where to eliminate candidates locally
on a per aggregate basis.
Parameters
---------
x : {array}
n x 1 vector of new candidate
AggOp : {CSR or CSC sparse matrix}
Aggregation operator for the level that x was generated for
A : {sparse matrix}
Operator for the level that x was generated for
T : {sparse matrix}
Tentative prolongation operator for the level that x was generated for
Ca : {scalar}
Constant threshold parameter to decide when to drop candidates
Returns
-------
Nothing, x is modified in place
"""
if not (isspmatrix_csr(AggOp) or isspmatrix_csc(AggOp)):
raise TypeError('AggOp must be a CSR or CSC matrix')
else:
AggOp = AggOp.tocsc()
ndof = max(x.shape)
nPDEs = int(ndof/AggOp.shape[0])
def aggregate_wise_inner_product(z, AggOp, nPDEs, ndof):
"""
Helper function that calculates <z, z>_i, i.e., the
inner product of z only over aggregate i
Returns a vector of length num_aggregates where entry i is <z, z>_i
"""
z = np.ravel(z)*np.ravel(z)
innerp = np.zeros((1, AggOp.shape[1]), dtype=z.dtype)
for j in range(nPDEs):
innerp += z[slice(j, ndof, nPDEs)].reshape(1, -1) * AggOp
return innerp.reshape(-1, 1)
def get_aggregate_weights(AggOp, A, z, nPDEs, ndof):
"""
Calculate local aggregate quantities
Return a vector of length num_aggregates where entry i is
(card(agg_i)/A.shape[0]) ( <Az, z>/rho(A) )
"""
rho = approximate_spectral_radius(A)
zAz = np.dot(z.reshape(1, -1), A*z.reshape(-1, 1))
card = nPDEs*(AggOp.indptr[1:]-AggOp.indptr[:-1])
weights = (np.ravel(card)*zAz)/(A.shape[0]*rho)
return weights.reshape(-1, 1)
# Run test 1, which finds where x is small relative to its energy
weights = Ca*get_aggregate_weights(AggOp, A, x, nPDEs, ndof)
mask1 = aggregate_wise_inner_product(x, AggOp, nPDEs, ndof) <= weights
# Run test 2, which finds where x is already approximated
# accurately by the existing T
projected_x = x - T*(T.T*x)
mask2 = aggregate_wise_inner_product(projected_x,
AggOp, nPDEs, ndof) <= weights
# Combine masks and zero out corresponding aggregates in x
mask = np.ravel(mask1 + mask2).nonzero()[0]
if mask.shape[0] > 0:
mask = nPDEs*AggOp[:, mask].indices
for j in range(nPDEs):
x[mask+j] = 0.0
def unpack_arg(v):
"""Helper function for local methods"""
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
def adaptive_sa_solver(A, initial_candidates=None, symmetry='hermitian',
pdef=True, num_candidates=1, candidate_iters=5,
improvement_iters=0, epsilon=0.1,
max_levels=10, max_coarse=100, aggregate='standard',
prepostsmoother=('gauss_seidel',
{'sweep': 'symmetric'}),
smooth=('jacobi', {}), strength='symmetric',
coarse_solver='pinv2',
eliminate_local=(False, {'Ca': 1.0}), keep=False,
**kwargs):
"""
Create a multilevel solver using Adaptive Smoothed Aggregation (aSA)
Parameters
----------
A : {csr_matrix, bsr_matrix}
Square matrix in CSR or BSR format
initial_candidates : {None, n x m dense matrix}
If a matrix, then this forms the basis for the first m candidates.
Also in this case, the initial setup stage is skipped, because this
provides the first candidate(s). If None, then a random initial guess
and relaxation are used to inform the initial candidate.
symmetry : {string}
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
Note that for the strictly real case, these two options are the same
Note that this flag does not denote definiteness of the operator
pdef : {bool}
True or False, whether A is known to be positive definite.
num_candidates : {integer} : default 1
Number of near-nullspace candidates to generate
candidate_iters : {integer} : default 5
Number of smoothing passes/multigrid cycles used at each level of
the adaptive setup phase
improvement_iters : {integer} : default 0
Number of times each candidate is improved
epsilon : {float} : default 0.1
Target convergence factor
max_levels : {integer} : default 10
Maximum number of levels to be used in the multilevel solver.
max_coarse : {integer} : default 500
Maximum number of variables permitted on the coarse grid.
prepostsmoother : {string or dict}
Pre- and post-smoother used in the adaptive method
strength : ['symmetric', 'classical', 'evolution',
('predefined', {'C': csr_matrix}), None]
Method used to determine the strength of connection between unknowns of
the linear system. See smoothed_aggregation_solver(...) documentation.
aggregate : ['standard', 'lloyd', 'naive',
('predefined', {'AggOp': csr_matrix})]
Method used to aggregate nodes. See smoothed_aggregation_solver(...)
documentation.
smooth : ['jacobi', 'richardson', 'energy', None]
Method used used to smooth the tentative prolongator. See
smoothed_aggregation_solver(...) documentation
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
eliminate_local : {tuple}
Length 2 tuple. If the first entry is True, then eliminate candidates
where they aren't needed locally, using the second entry of the tuple
to contain arguments to local elimination routine. Given the rigid
sparse data structures, this doesn't help much, if at all, with
complexity. Its more of a diagnostic utility.
keep: {bool} : default False
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), and aggregation (AggOp) are kept.
Returns
-------
multilevel_solver : multilevel_solver
Smoothed aggregation solver with adaptively generated candidates
Notes
-----
- Floating point value representing the "work" required to generate
the solver. This value is the total cost of just relaxation, relative
to the fine grid. The relaxation method used is assumed to symmetric
Gauss-Seidel.
- Unlike the standard Smoothed Aggregation (SA) method, adaptive SA does
not require knowledge of near-nullspace candidate vectors. Instead, an
adaptive procedure computes one or more candidates 'from scratch'. This
approach is useful when no candidates are known or the candidates have
been invalidated due to changes to matrix A.
Examples
--------
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.aggregation import adaptive_sa_solver
>>> import numpy as np
>>> A=stencil_grid([[-1,-1,-1],[-1,8.0,-1],[-1,-1,-1]],\
(31,31),format='csr')
>>> [asa,work] = adaptive_sa_solver(A,num_candidates=1)
>>> residuals=[]
>>> x=asa.solve(b=np.ones((A.shape[0],)), x0=np.ones((A.shape[0],)),\
residuals=residuals)
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
http://www.cs.umn.edu/~maclach/research/aSA2.pdf
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn("Implicit conversion of A to CSR",
SparseEfficiencyWarning)
except:
raise TypeError('Argument A must have type csr_matrix or\
bsr_matrix, or be convertible to csr_matrix')
A = A.asfptype()
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Track work in terms of relaxation
work = np.zeros((1,))
# Levelize the user parameters, so that they become lists describing the
# desired user option on each level.
max_levels, max_coarse, strength =\
levelize_strength_or_aggregation(strength, max_levels, max_coarse)
max_levels, max_coarse, aggregate =\
levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)
# Develop initial candidate(s). Note that any predefined aggregation is
# preserved.
if initial_candidates is None:
B, aggregate, strength =\
initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
max_levels, max_coarse, aggregate,
prepostsmoother, smooth, strength, work)
# Normalize B
B = (1.0/norm(B, 'inf')) * B
num_candidates -= 1
else:
# Otherwise, use predefined candidates
B = initial_candidates
num_candidates -= B.shape[1]
# Generate Aggregation and Strength Operators (the brute force way)
sa = smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth, strength=strength,
max_levels=max_levels,
max_coarse=max_coarse,
aggregate=aggregate,
coarse_solver=coarse_solver,
improve_candidates=None, keep=True,
**kwargs)
if len(sa.levels) > 1:
# Set strength-of-connection and aggregation
aggregate = [('predefined', {'AggOp': sa.levels[i].AggOp.tocsr()})
for i in range(len(sa.levels) - 1)]
strength = [('predefined', {'C': sa.levels[i].C.tocsr()})
for i in range(len(sa.levels) - 1)]
# Develop additional candidates
for i in range(num_candidates):
x = general_setup_stage(
smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate,
strength=strength,
improve_candidates=None,
keep=True, **kwargs),
symmetry, candidate_iters, prepostsmoother, smooth,
eliminate_local, coarse_solver, work)
# Normalize x and add to candidate list
x = x/norm(x, 'inf')
if np.isinf(x[0]) or np.isnan(x[0]):
raise ValueError('Adaptive candidate is all 0.')
B = np.hstack((B, x.reshape(-1, 1)))
# Improve candidates
if B.shape[1] > 1 and improvement_iters > 0:
b = np.zeros((A.shape[0], 1), dtype=A.dtype)
for i in range(improvement_iters):
for j in range(B.shape[1]):
# Run a V-cycle built on everything except candidate j, while
# using candidate j as the initial guess
x0 = B[:, 0]
B = B[:, 1:]
sa_temp =\
smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate,
strength=strength,
improve_candidates=None,
keep=True, **kwargs)
x = sa_temp.solve(b, x0=x0,
tol=float(np.finfo(np.float).tiny),
maxiter=candidate_iters, cycle='V')
work[:] += 2 * sa_temp.operator_complexity() *\
sa_temp.levels[0].A.nnz * candidate_iters
# Apply local elimination
elim, elim_kwargs = unpack_arg(eliminate_local)
if elim is True:
x = x/norm(x, 'inf')
eliminate_local_candidates(x, sa_temp.levels[0].AggOp, A,
sa_temp.levels[0].T,
**elim_kwargs)
# Normalize x and add to candidate list
x = x/norm(x, 'inf')
if np.isinf(x[0]) or np.isnan(x[0]):
raise ValueError('Adaptive candidate is all 0.')
B = np.hstack((B, x.reshape(-1, 1)))
elif improvement_iters > 0:
# Special case for improving a single candidate
max_levels = len(aggregate) + 1
max_coarse = 0
for i in range(improvement_iters):
B, aggregate, strength =\
initial_setup_stage(A, symmetry, pdef, candidate_iters,
epsilon, max_levels, max_coarse,
aggregate, prepostsmoother, smooth,
strength, work, initial_candidate=B)
# Normalize B
B = (1.0/norm(B, 'inf'))*B
return [smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate, strength=strength,
improve_candidates=None, keep=keep,
**kwargs),
work[0]/A.nnz]
def initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
max_levels, max_coarse, aggregate, prepostsmoother,
smooth, strength, work, initial_candidate=None):
"""
Computes a complete aggregation and the first near-nullspace candidate
following Algorithm 3 in Brezina et al.
Parameters
----------
candidate_iters
number of test relaxation iterations
epsilon
minimum acceptable relaxation convergence factor
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
http://www.cs.umn.edu/~maclach/research/aSA2.pdf
"""
# Define relaxation routine
def relax(A, x):
fn, kwargs = unpack_arg(prepostsmoother)
if fn == 'gauss_seidel':
gauss_seidel(A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_nr':
gauss_seidel_nr(A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_ne':
gauss_seidel_ne(A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'jacobi':
jacobi(A, x, np.zeros_like(x), iterations=1,
omega=1.0 / rho_D_inv_A(A))
elif fn == 'richardson':
polynomial(A, x, np.zeros_like(x), iterations=1,
coeffients=[1.0/approximate_spectral_radius(A)])
elif fn == 'gmres':
x[:] = (gmres(A, np.zeros_like(x), x0=x,
maxiter=candidate_iters)[0]).reshape(x.shape)
else:
raise TypeError('Unrecognized smoother')
# flag for skipping steps f-i in step 4
skip_f_to_i = True
# step 1
A_l = A
if initial_candidate is None:
x = sp.rand(A_l.shape[0], 1)
if A_l.dtype == complex:
x = x + 1.0j*sp.rand(A_l.shape[0], 1)
else:
x = np.array(initial_candidate, dtype=A_l.dtype)
# step 2
relax(A_l, x)
work[:] += A_l.nnz * candidate_iters*2
# step 3
# not advised to stop the iteration here: often the first relaxation pass
# _is_ good, but the remaining passes are poor
# if x_A_x/x_A_x_old < epsilon:
# # relaxation alone is sufficient
# print 'relaxation alone works: %g'%(x_A_x/x_A_x_old)
# return x, []
# step 4
As = [A]
xs = [x]
Ps = []
AggOps = []
StrengthOps = []
while A.shape[0] > max_coarse and max_levels > 1:
# The real check to break from the while loop is below
# Begin constructing next level
fn, kwargs = unpack_arg(strength[len(As)-1]) # step 4b
if fn == 'symmetric':
C_l = symmetric_strength_of_connection(A_l, **kwargs)
# Diagonal must be nonzero
C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
elif fn == 'classical':
C_l = classical_strength_of_connection(A_l, **kwargs)
# Diagonal must be nonzero
C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
if isspmatrix_bsr(A_l):
C_l = amalgamate(C_l, A_l.blocksize[0])
elif (fn == 'ode') or (fn == 'evolution'):
C_l = evolution_strength_of_connection(A_l,
np.ones(
(A_l.shape[0], 1),
dtype=A.dtype),
**kwargs)
elif fn == 'predefined':
C_l = kwargs['C'].tocsr()
elif fn is None:
C_l = A_l.tocsr()
else:
raise ValueError('unrecognized strength of connection method: %s' %
str(fn))
# In SA, strength represents "distance", so we take magnitude of
# complex values
if C_l.dtype == complex:
C_l.data = np.abs(C_l.data)
# Create a unified strength framework so that large values represent
# strong connections and small values represent weak connections
if (fn == 'ode') or (fn == 'evolution') or (fn == 'energy_based'):
C_l.data = 1.0 / C_l.data
# aggregation
fn, kwargs = unpack_arg(aggregate[len(As) - 1])
if fn == 'standard':
AggOp = standard_aggregation(C_l, **kwargs)[0]
elif fn == 'lloyd':
AggOp = lloyd_aggregation(C_l, **kwargs)[0]
elif fn == 'predefined':
AggOp = kwargs['AggOp'].tocsr()
else:
raise ValueError('unrecognized aggregation method %s' % str(fn))
T_l, x = fit_candidates(AggOp, x) # step 4c
fn, kwargs = unpack_arg(smooth[len(As)-1]) # step 4d
if fn == 'jacobi':
P_l = jacobi_prolongation_smoother(A_l, T_l, C_l, x, **kwargs)
elif fn == 'richardson':
P_l = richardson_prolongation_smoother(A_l, T_l, **kwargs)
elif fn == 'energy':
P_l = energy_prolongation_smoother(A_l, T_l, C_l, x, None,
(False, {}), **kwargs)
elif fn is None:
P_l = T_l
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# R should reflect A's structure # step 4e
if symmetry == 'symmetric':
A_l = P_l.T.asformat(P_l.format) * A_l * P_l
elif symmetry == 'hermitian':
A_l = P_l.H.asformat(P_l.format) * A_l * P_l
StrengthOps.append(C_l)
AggOps.append(AggOp)
Ps.append(P_l)
As.append(A_l)
# skip to step 5 as in step 4e
if (A_l.shape[0] <= max_coarse) or (len(AggOps) + 1 >= max_levels):
break
if not skip_f_to_i:
x_hat = x.copy() # step 4g
relax(A_l, x) # step 4h
work[:] += A_l.nnz*candidate_iters*2
if pdef is True:
x_A_x = np.dot(np.conjugate(x).T, A_l*x)
xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l*x_hat)
err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)
else:
# use A.H A inner-product
Ax = A_l * x
# Axhat = A_l * x_hat
x_A_x = np.dot(np.conjugate(Ax).T, Ax)
xhat_A_xhat = np.dot(np.conjugate(x_hat).T, A_l*x_hat)
err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)
if err_ratio < epsilon: # step 4i
# print "sufficient convergence, skipping"
skip_f_to_i = True
if x_A_x == 0:
x = x_hat # need to restore x
else:
# just carry out relaxation, don't check for convergence
relax(A_l, x) # step 4h
work[:] += 2 * A_l.nnz * candidate_iters
# store xs for diagnostic use and for use in step 5
xs.append(x)
# step 5
# Extend coarse-level candidate to the finest level
# --> note that we start with the x from the second coarsest level
x = xs[-1]
# make sure that xs[-1] has been relaxed by step 4h, i.e. relax(As[-2], x)
for lev in range(len(Ps)-2, -1, -1): # lev = coarsest ... finest-1
P = Ps[lev] # I: lev --> lev+1
A = As[lev] # A on lev+1
x = P * x
relax(A, x)
work[:] += A.nnz*candidate_iters*2
# Set predefined strength of connection and aggregation
if len(AggOps) > 1:
aggregate = [('predefined', {'AggOp': AggOps[i]})
for i in range(len(AggOps))]
strength = [('predefined', {'C': StrengthOps[i]})
for i in range(len(StrengthOps))]
return x, aggregate, strength # first candidate
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother,
smooth, eliminate_local, coarse_solver, work):
"""
Computes additional candidates and improvements
following Algorithm 4 in Brezina et al.
Parameters
----------
candidate_iters
number of test relaxation iterations
epsilon
minimum acceptable relaxation convergence factor
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation (alphaSA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
http://www.cs.umn.edu/~maclach/research/aSA2.pdf
"""
def make_bridge(T):
M, N = T.shape
K = T.blocksize[0]
bnnz = T.indptr[-1]
# the K+1 represents the new dof introduced by the new candidate. the
# bridge 'T' ignores this new dof and just maps zeros there
data = np.zeros((bnnz, K+1, K), dtype=T.dtype)
data[:, :-1, :] = T.data
return bsr_matrix((data, T.indices, T.indptr),
shape=((K + 1) * (M / K), N))
def expand_candidates(B_old, nodesize):
# insert a new dof that is always zero, to create NullDim+1 dofs per
# node in B
NullDim = B_old.shape[1]
nnodes = B_old.shape[0] / nodesize
Bnew = np.zeros((nnodes, nodesize+1, NullDim), dtype=B_old.dtype)
Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
return Bnew.reshape(-1, NullDim)
levels = ml.levels
x = sp.rand(levels[0].A.shape[0], 1)
if levels[0].A.dtype == complex:
x = x + 1.0j*sp.rand(levels[0].A.shape[0], 1)
b = np.zeros_like(x)
x = ml.solve(b, x0=x, tol=float(np.finfo(np.float).tiny),
maxiter=candidate_iters)
work[:] += ml.operator_complexity()*ml.levels[0].A.nnz*candidate_iters*2
T0 = levels[0].T.copy()
# TEST FOR CONVERGENCE HERE
for i in range(len(ml.levels) - 2):
# alpha-SA paper does local elimination here, but after talking
# to Marian, its not clear that this helps things
# fn, kwargs = unpack_arg(eliminate_local)
# if fn == True:
# eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
# levels[i].T, **kwargs)
# add candidate to B
B = np.hstack((levels[i].B, x.reshape(-1, 1)))
# construct Ptent
T, R = fit_candidates(levels[i].AggOp, B)
levels[i].T = T
x = R[:, -1].reshape(-1, 1)
# smooth P
fn, kwargs = unpack_arg(smooth[i])
if fn == 'jacobi':
levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
levels[i].C, R,
**kwargs)
elif fn == 'richardson':
levels[i].P = richardson_prolongation_smoother(levels[i].A, T,
**kwargs)
elif fn == 'energy':
levels[i].P = energy_prolongation_smoother(levels[i].A, T,
levels[i].C, R, None,
(False, {}), **kwargs)
x = R[:, -1].reshape(-1, 1)
elif fn is None:
levels[i].P = T
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# construct R
if symmetry == 'symmetric': # R should reflect A's structure
levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
elif symmetry == 'hermitian':
levels[i].R = levels[i].P.H.asformat(levels[i].P.format)
# construct coarse A
levels[i+1].A = levels[i].R * levels[i].A * levels[i].P
# construct bridging P
T_bridge = make_bridge(levels[i+1].T)
R_bridge = levels[i+2].B
# smooth bridging P
fn, kwargs = unpack_arg(smooth[i+1])
if fn == 'jacobi':
levels[i+1].P = jacobi_prolongation_smoother(levels[i+1].A,
T_bridge,
levels[i+1].C,
R_bridge, **kwargs)
elif fn == 'richardson':
levels[i+1].P = richardson_prolongation_smoother(levels[i+1].A,
T_bridge,
**kwargs)
elif fn == 'energy':
levels[i+1].P = energy_prolongation_smoother(levels[i+1].A,
T_bridge,
levels[i+1].C,
R_bridge, None,
(False, {}), **kwargs)
elif fn is None:
levels[i+1].P = T_bridge
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# construct the "bridging" R
if symmetry == 'symmetric': # R should reflect A's structure
levels[i+1].R = levels[i+1].P.T.asformat(levels[i+1].P.format)
elif symmetry == 'hermitian':
levels[i+1].R = levels[i+1].P.H.asformat(levels[i+1].P.format)
# run solver on candidate
solver = multilevel_solver(levels[i+1:], coarse_solver=coarse_solver)
change_smoothers(solver, presmoother=prepostsmoother,
postsmoother=prepostsmoother)
x = solver.solve(np.zeros_like(x), x0=x,
tol=float(np.finfo(np.float).tiny),
maxiter=candidate_iters)
work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
candidate_iters*2
# update values on next level
levels[i+1].B = R[:, :-1].copy()
levels[i+1].T = T_bridge
# note that we only use the x from the second coarsest level
fn, kwargs = unpack_arg(prepostsmoother)
for lvl in reversed(levels[:-2]):
x = lvl.P * x
work[:] += lvl.A.nnz*candidate_iters*2
if fn == 'gauss_seidel':
# only relax at nonzeros, so as not to mess up any locally dropped
# candidates
indices = np.ravel(x).nonzero()[0]
gauss_seidel_indexed(lvl.A, x, np.zeros_like(x), indices,
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_ne':
gauss_seidel_ne(lvl.A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_nr':
gauss_seidel_nr(lvl.A, x, np.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'jacobi':
jacobi(lvl.A, x, np.zeros_like(x), iterations=1,
omega=1.0 / rho_D_inv_A(lvl.A))
elif fn == 'richardson':
polynomial(lvl.A, x, np.zeros_like(x), iterations=1,
coeffients=[1.0/approximate_spectral_radius(lvl.A)])
elif fn == 'gmres':
x[:] = (gmres(lvl.A, np.zeros_like(x), x0=x,
maxiter=candidate_iters)[0]).reshape(x.shape)
else:
raise TypeError('Unrecognized smoother')
# x will be dense again, so we have to drop locally again
elim, elim_kwargs = unpack_arg(eliminate_local)
if elim is True:
x = x/norm(x, 'inf')
eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
**elim_kwargs)
return x.reshape(-1, 1)
| {
"repo_name": "kidaa/pyamg",
"path": "pyamg/aggregation/adaptive.py",
"copies": "1",
"size": "32192",
"license": "bsd-3-clause",
"hash": 1693258017600904700,
"line_mean": 41.1913499345,
"line_max": 79,
"alpha_frac": 0.5353503976,
"autogenerated": false,
"ratio": 3.82782401902497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.486317441662497,
"avg_score": null,
"num_lines": null
} |
"""Adaptive Smoothed Aggregation"""
__docformat__ = "restructuredtext en"
from warnings import warn
import numpy
import scipy
from scipy.sparse import csr_matrix, bsr_matrix, isspmatrix_csr,\
isspmatrix_csc, isspmatrix_bsr, eye
from pyamg.multilevel import multilevel_solver
from pyamg.strength import symmetric_strength_of_connection,\
classical_strength_of_connection, evolution_strength_of_connection
from pyamg.relaxation import gauss_seidel, gauss_seidel_nr, gauss_seidel_ne,\
gauss_seidel_indexed, jacobi, polynomial
from pyamg.relaxation.smoothing import change_smoothers, rho_D_inv_A
from pyamg.krylov import gmres
from pyamg.util.linalg import norm, approximate_spectral_radius
from pyamg.aggregation.aggregation import preprocess_str_or_agg,\
preprocess_smooth
from aggregation import smoothed_aggregation_solver
from aggregate import standard_aggregation, lloyd_aggregation
from smooth import jacobi_prolongation_smoother, energy_prolongation_smoother,\
richardson_prolongation_smoother
from tentative import fit_candidates
from pyamg.util.utils import amalgamate
__all__ = ['adaptive_sa_solver']
def eliminate_local_candidates(x, AggOp, A, T, Ca=1.0, **kwargs):
"""
Helper function that determines where to eliminate candidates locally
on a per aggregate basis.
Parameters
---------
x : {array}
n x 1 vector of new candidate
AggOp : {CSR or CSC sparse matrix}
Aggregation operator for the level that x was generated for
A : {sparse matrix}
Operator for the level that x was generated for
T : {sparse matrix}
Tentative prolongation operator for the level that x was generated for
Ca : {scalar}
Constant threshold parameter to decide when to drop candidates
Returns
-------
Nothing, x is modified in place
"""
if not (isspmatrix_csr(AggOp) or isspmatrix_csc(AggOp)):
raise TypeError('AggOp must be a CSR or CSC matrix')
else:
AggOp = AggOp.tocsc()
ndof = max(x.shape)
nPDEs = int(ndof/AggOp.shape[0])
def aggregate_wise_inner_product(z, AggOp, nPDEs, ndof):
"""
Helper function that calculates <z, z>_i, i.e., the
inner product of z only over aggregate i
Returns a vector of length num_aggregates where entry i is <z, z>_i
"""
z = numpy.ravel(z)*numpy.ravel(z)
innerp = numpy.zeros((1, AggOp.shape[1]), dtype=z.dtype)
for j in range(nPDEs):
innerp += z[slice(j, ndof, nPDEs)].reshape(1, -1) * AggOp
return innerp.reshape(-1, 1)
def get_aggregate_weights(AggOp, A, z, nPDEs, ndof):
"""
Calculate local aggregate quantities
Return a vector of length num_aggregates where entry i is
(card(agg_i)/A.shape[0]) ( <Az, z>/rho(A) )
"""
rho = approximate_spectral_radius(A)
zAz = numpy.dot(z.reshape(1, -1), A*z.reshape(-1, 1))
card = nPDEs*(AggOp.indptr[1:]-AggOp.indptr[:-1])
weights = (numpy.ravel(card)*zAz)/(A.shape[0]*rho)
return weights.reshape(-1, 1)
# Run test 1, which finds where x is small relative to its energy
weights = Ca*get_aggregate_weights(AggOp, A, x, nPDEs, ndof)
mask1 = aggregate_wise_inner_product(x, AggOp, nPDEs, ndof) <= weights
# Run test 2, which finds where x is already approximated
# accurately by the existing T
projected_x = x - T*(T.T*x)
mask2 = aggregate_wise_inner_product(projected_x,
AggOp, nPDEs, ndof) <= weights
# Combine masks and zero out corresponding aggregates in x
mask = numpy.ravel(mask1 + mask2).nonzero()[0]
if mask.shape[0] > 0:
mask = nPDEs*AggOp[:, mask].indices
for j in range(nPDEs):
x[mask+j] = 0.0
def unpack_arg(v):
"""Helper function for local methods"""
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
def adaptive_sa_solver(A, initial_candidates=None, symmetry='hermitian',
pdef=True, num_candidates=1, candidate_iters=5,
improvement_iters=0, epsilon=0.1,
max_levels=10, max_coarse=100, aggregate='standard',
prepostsmoother=('gauss_seidel',
{'sweep': 'symmetric'}),
smooth=('jacobi', {}), strength='symmetric',
coarse_solver='pinv2',
eliminate_local=(False, {'Ca': 1.0}), keep=False,
**kwargs):
"""
Create a multilevel solver using Adaptive Smoothed Aggregation (aSA)
Parameters
----------
A : {csr_matrix, bsr_matrix}
Square matrix in CSR or BSR format
initial_candidates : {None, n x m dense matrix}
If a matrix, then this forms the basis for the first m candidates.
Also in this case, the initial setup stage is skipped, because this
provides the first candidate(s). If None, then a random initial guess
and relaxation are used to inform the initial candidate.
symmetry : {string}
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
Note that for the strictly real case, these two options are the same
Note that this flag does not denote definiteness of the operator
pdef : {bool}
True or False, whether A is known to be positive definite.
num_candidates : {integer} : default 1
Number of near-nullspace candidates to generate
candidate_iters : {integer} : default 5
Number of smoothing passes/multigrid cycles used at each level of
the adaptive setup phase
improvement_iters : {integer} : default 0
Number of times each candidate is improved
epsilon : {float} : default 0.1
Target convergence factor
max_levels : {integer} : default 10
Maximum number of levels to be used in the multilevel solver.
max_coarse : {integer} : default 500
Maximum number of variables permitted on the coarse grid.
prepostsmoother : {string or dict}
Pre- and post-smoother used in the adaptive method
strength : ['symmetric', 'classical', 'evolution',
('predefined', {'C': csr_matrix}), None]
Method used to determine the strength of connection between unknowns of
the linear system. See smoothed_aggregation_solver(...) documentation.
aggregate : ['standard', 'lloyd', 'naive',
('predefined', {'AggOp': csr_matrix})]
Method used to aggregate nodes. See smoothed_aggregation_solver(...)
documentation.
smooth : ['jacobi', 'richardson', 'energy', None]
Method used used to smooth the tentative prolongator. See
smoothed_aggregation_solver(...) documentation
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
eliminate_local : {tuple}
Length 2 tuple. If the first entry is True, then eliminate candidates
where they aren't needed locally, using the second entry of the tuple
to contain arguments to local elimination routine. Given the rigid
sparse data structures, this doesn't help much, if at all, with
complexity. Its more of a diagnostic utility.
keep: {bool} : default False
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), and aggregation (AggOp) are kept.
Returns
-------
multilevel_solver : multilevel_solver
Smoothed aggregation solver with adaptively generated candidates
Notes
-----
- Floating point value representing the "work" required to generate
the solver. This value is the total cost of just relaxation, relative
to the fine grid. The relaxation method used is assumed to symmetric
Gauss-Seidel.
- Unlike the standard Smoothed Aggregation (SA) method, adaptive SA does
not require knowledge of near-nullspace candidate vectors. Instead, an
adaptive procedure computes one or more candidates 'from scratch'. This
approach is useful when no candidates are known or the candidates have
been invalidated due to changes to matrix A.
Examples
--------
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.aggregation import adaptive_sa_solver
>>> import numpy
>>> A=stencil_grid([[-1,-1,-1],[-1,8.0,-1],[-1,-1,-1]],
(31,31),format='csr')
>>> [asa,work] = adaptive_sa_solver(A,num_candidates=1)
>>> residuals=[]
>>> x=asa.solve(b=numpy.ones((A.shape[0],)), x0=numpy.ones((A.shape[0],)),
residuals=residuals)
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
http://www.cs.umn.edu/~maclach/research/aSA2.pdf
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn("Implicit conversion of A to CSR",
scipy.sparse.SparseEfficiencyWarning)
except:
raise TypeError('Argument A must have type csr_matrix or\
bsr_matrix, or be convertible to csr_matrix')
A = A.asfptype()
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Track work in terms of relaxation
work = numpy.zeros((1,))
# Preprocess parameters
max_levels, max_coarse, strength =\
preprocess_str_or_agg(strength, max_levels, max_coarse)
smooth = preprocess_smooth(smooth, max_levels)
max_levels, max_coarse, aggregate =\
preprocess_str_or_agg(aggregate, max_levels, max_coarse)
# Develop initial candidate(s). Note that any predefined aggregation is
# preserved.
if initial_candidates is None:
B, aggregate, strength =\
initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
max_levels, max_coarse, aggregate,
prepostsmoother, smooth, strength, work)
# Normalize B
B = (1.0/norm(B, 'inf')) * B
num_candidates -= 1
else:
# Otherwise, use predefined candidates
B = initial_candidates
num_candidates -= B.shape[1]
# Generate Aggregation and Strength Operators (the brute force way)
sa = smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth, strength=strength,
max_levels=max_levels,
max_coarse=max_coarse,
aggregate=aggregate,
coarse_solver=coarse_solver,
Bimprove=None, keep=True, **kwargs)
if len(sa.levels) > 1:
# Set strength-of-connection and aggregation
aggregate = [('predefined', {'AggOp': sa.levels[i].AggOp.tocsr()})
for i in range(len(sa.levels) - 1)]
strength = [('predefined', {'C': sa.levels[i].C.tocsr()})
for i in range(len(sa.levels) - 1)]
##
# Develop additional candidates
for i in range(num_candidates):
x = general_setup_stage(
smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate,
strength=strength, Bimprove=None,
keep=True, **kwargs),
symmetry, candidate_iters, prepostsmoother, smooth,
eliminate_local, coarse_solver, work)
# Normalize x and add to candidate list
x = x/norm(x, 'inf')
if numpy.isinf(x[0]) or numpy.isnan(x[0]):
raise ValueError('Adaptive candidate is all 0.')
B = numpy.hstack((B, x.reshape(-1, 1)))
##
# Improve candidates
if B.shape[1] > 1 and improvement_iters > 0:
b = numpy.zeros((A.shape[0], 1), dtype=A.dtype)
for i in range(improvement_iters):
for j in range(B.shape[1]):
# Run a V-cycle built on everything except candidate j, while
# using candidate j as the initial guess
x0 = B[:, 0]
B = B[:, 1:]
sa_temp =\
smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate,
strength=strength,
Bimprove=None, keep=True,
**kwargs)
x = sa_temp.solve(b, x0=x0,
tol=float(numpy.finfo(numpy.float).tiny),
maxiter=candidate_iters, cycle='V')
work[:] += 2 * sa_temp.operator_complexity() *\
sa_temp.levels[0].A.nnz * candidate_iters
# Apply local elimination
elim, elim_kwargs = unpack_arg(eliminate_local)
if elim is True:
x = x/norm(x, 'inf')
eliminate_local_candidates(x, sa_temp.levels[0].AggOp, A,
sa_temp.levels[0].T,
**elim_kwargs)
# Normalize x and add to candidate list
x = x/norm(x, 'inf')
if numpy.isinf(x[0]) or numpy.isnan(x[0]):
raise ValueError('Adaptive candidate is all 0.')
B = numpy.hstack((B, x.reshape(-1, 1)))
elif improvement_iters > 0:
##
# Special case for improving a single candidate
max_levels = len(aggregate) + 1
max_coarse = 0
for i in range(improvement_iters):
B, aggregate, strength =\
initial_setup_stage(A, symmetry, pdef, candidate_iters,
epsilon, max_levels, max_coarse,
aggregate, prepostsmoother, smooth,
strength, work, initial_candidate=B)
# Normalize B
B = (1.0/norm(B, 'inf'))*B
return [smoothed_aggregation_solver(A, B=B, symmetry=symmetry,
presmoother=prepostsmoother,
postsmoother=prepostsmoother,
smooth=smooth,
coarse_solver=coarse_solver,
aggregate=aggregate, strength=strength,
Bimprove=None, keep=keep, **kwargs),
work[0]/A.nnz]
def initial_setup_stage(A, symmetry, pdef, candidate_iters, epsilon,
max_levels, max_coarse, aggregate, prepostsmoother,
smooth, strength, work, initial_candidate=None):
"""
Computes a complete aggregation and the first near-nullspace candidate
following Algorithm 3 in Brezina et al.
Parameters
----------
candidate_iters
number of test relaxation iterations
epsilon
minimum acceptable relaxation convergence factor
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation ($\alpha$SA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
http://www.cs.umn.edu/~maclach/research/aSA2.pdf
"""
##
# Define relaxation routine
def relax(A, x):
fn, kwargs = unpack_arg(prepostsmoother)
if fn == 'gauss_seidel':
gauss_seidel(A, x, numpy.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_nr':
gauss_seidel_nr(A, x, numpy.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_ne':
gauss_seidel_ne(A, x, numpy.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'jacobi':
jacobi(A, x, numpy.zeros_like(x), iterations=1,
omega=1.0 / rho_D_inv_A(A))
elif fn == 'richardson':
polynomial(A, x, numpy.zeros_like(x), iterations=1,
coeffients=[1.0/approximate_spectral_radius(A)])
elif fn == 'gmres':
x[:] = (gmres(A, numpy.zeros_like(x), x0=x,
maxiter=candidate_iters)[0]).reshape(x.shape)
else:
raise TypeError('Unrecognized smoother')
# flag for skipping steps f-i in step 4
skip_f_to_i = True
#step 1
A_l = A
if initial_candidate is None:
x = scipy.rand(A_l.shape[0], 1)
if A_l.dtype == complex:
x = x + 1.0j*scipy.rand(A_l.shape[0], 1)
else:
x = numpy.array(initial_candidate, dtype=A_l.dtype)
#step 2
relax(A_l, x)
work[:] += A_l.nnz * candidate_iters*2
# step 3
# not advised to stop the iteration here: often the first relaxation pass
# _is_ good, but the remaining passes are poor
# if x_A_x/x_A_x_old < epsilon:
# # relaxation alone is sufficient
# print 'relaxation alone works: %g'%(x_A_x/x_A_x_old)
# return x, []
# step 4
As = [A]
xs = [x]
Ps = []
AggOps = []
StrengthOps = []
while A.shape[0] > max_coarse and max_levels > 1:
# The real check to break from the while loop is below
# Begin constructing next level
fn, kwargs = unpack_arg(strength[len(As)-1]) # step 4b
if fn == 'symmetric':
C_l = symmetric_strength_of_connection(A_l, **kwargs)
# Diagonal must be nonzero
C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
elif fn == 'classical':
C_l = classical_strength_of_connection(A_l, **kwargs)
# Diagonal must be nonzero
C_l = C_l + eye(C_l.shape[0], C_l.shape[1], format='csr')
if isspmatrix_bsr(A_l):
C_l = amalgamate(C, A_l.blocksize[0])
elif (fn == 'ode') or (fn == 'evolution'):
C_l = evolution_strength_of_connection(A_l,
numpy.ones(
(A_l.shape[0], 1),
dtype=A.dtype),
**kwargs)
elif fn == 'predefined':
C_l = kwargs['C'].tocsr()
elif fn is None:
C_l = A_l.tocsr()
else:
raise ValueError('unrecognized strength of connection method: %s' %
str(fn))
# In SA, strength represents "distance", so we take magnitude of
# complex values
if C_l.dtype == complex:
C_l.data = numpy.abs(C_l.data)
# Create a unified strength framework so that large values represent
# strong connections and small values represent weak connections
if (fn == 'ode') or (fn == 'evolutin') or (fn == 'energy_based'):
C_l.data = 1.0 / C_l.data
# aggregation
fn, kwargs = unpack_arg(aggregate[len(As) - 1])
if fn == 'standard':
AggOp = standard_aggregation(C_l, **kwargs)[0]
elif fn == 'lloyd':
AggOp = lloyd_aggregation(C_l, **kwargs)[0]
elif fn == 'predefined':
AggOp = kwargs['AggOp'].tocsr()
else:
raise ValueError('unrecognized aggregation method %s' % str(fn))
T_l, x = fit_candidates(AggOp, x) # step 4c
fn, kwargs = unpack_arg(smooth[len(As)-1]) # step 4d
if fn == 'jacobi':
P_l = jacobi_prolongation_smoother(A_l, T_l, C_l, x, **kwargs)
elif fn == 'richardson':
P_l = richardson_prolongation_smoother(A_l, T_l, **kwargs)
elif fn == 'energy':
P_l = energy_prolongation_smoother(A_l, T_l, C_l, x, None,
(False, {}), **kwargs)
elif fn is None:
P_l = T_l
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# R should reflect A's structure # step 4e
if symmetry == 'symmetric':
A_l = P_l.T.asformat(P_l.format) * A_l * P_l
elif symmetry == 'hermitian':
A_l = P_l.H.asformat(P_l.format) * A_l * P_l
StrengthOps.append(C_l)
AggOps.append(AggOp)
Ps.append(P_l)
As.append(A_l)
# skip to step 5 as in step 4e
if (A_l.shape[0] <= max_coarse) or (len(AggOps) + 1 >= max_levels):
break
if not skip_f_to_i:
x_hat = x.copy() # step 4g
relax(A_l, x) # step 4h
work[:] += A_l.nnz*candidate_iters*2
if pdef is True:
x_A_x = numpy.dot(numpy.conjugate(x).T, A_l*x)
xhat_A_xhat = numpy.dot(numpy.conjugate(x_hat).T, A_l*x_hat)
err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)
else:
# use A.H A inner-product
Ax = A_l * x
#Axhat = A_l * x_hat
x_A_x = numpy.dot(numpy.conjugate(Ax).T, Ax)
xhat_A_xhat = numpy.dot(numpy.conjugate(x_hat).T, A_l*x_hat)
err_ratio = (x_A_x/xhat_A_xhat)**(1.0/candidate_iters)
if err_ratio < epsilon: # step 4i
#print "sufficient convergence, skipping"
skip_f_to_i = True
if x_A_x == 0:
x = x_hat # need to restore x
else:
# just carry out relaxation, don't check for convergence
relax(A_l, x) # step 4h
work[:] += 2 * A_l.nnz * candidate_iters
# store xs for diagnostic use and for use in step 5
xs.append(x)
# step 5
# Extend coarse-level candidate to the finest level
# --> note that we start with the x from the second coarsest level
x = xs[-1]
# make sure that xs[-1] has been relaxed by step 4h, i.e. relax(As[-2], x)
for lev in range(len(Ps)-2, -1, -1): # lev = coarsest ... finest-1
P = Ps[lev] # I: lev --> lev+1
A = As[lev] # A on lev+1
x = P * x
relax(A, x)
work[:] += A.nnz*candidate_iters*2
# Set predefined strength of connection and aggregation
if len(AggOps) > 1:
aggregate = [('predefined', {'AggOp': AggOps[i]})
for i in range(len(AggOps))]
strength = [('predefined', {'C': StrengthOps[i]})
for i in range(len(StrengthOps))]
return x, aggregate, strength # first candidate
def general_setup_stage(ml, symmetry, candidate_iters, prepostsmoother,
smooth, eliminate_local, coarse_solver, work):
"""
Computes additional candidates and improvements
following Algorithm 4 in Brezina et al.
Parameters
----------
candidate_iters
number of test relaxation iterations
epsilon
minimum acceptable relaxation convergence factor
References
----------
.. [1] Brezina, Falgout, MacLachlan, Manteuffel, McCormick, and Ruge
"Adaptive Smoothed Aggregation (alphaSA) Multigrid"
SIAM Review Volume 47, Issue 2 (2005)
http://www.cs.umn.edu/~maclach/research/aSA2.pdf
"""
def make_bridge(T):
M, N = T.shape
K = T.blocksize[0]
bnnz = T.indptr[-1]
# the K+1 represents the new dof introduced by the new candidate. the
# bridge 'T' ignores this new dof and just maps zeros there
data = numpy.zeros((bnnz, K+1, K), dtype=T.dtype)
data[:, :-1, :] = T.data
return bsr_matrix((data, T.indices, T.indptr),
shape=((K + 1) * (M / K), N))
def expand_candidates(B_old, nodesize):
# insert a new dof that is always zero, to create NullDim+1 dofs per
# node in B
NullDim = B_old.shape[1]
nnodes = B_old.shape[0] / nodesize
Bnew = numpy.zeros((nnodes, nodesize+1, NullDim), dtype=B_old.dtype)
Bnew[:, :-1, :] = B_old.reshape(nnodes, nodesize, NullDim)
return Bnew.reshape(-1, NullDim)
levels = ml.levels
x = scipy.rand(levels[0].A.shape[0], 1)
if levels[0].A.dtype == complex:
x = x + 1.0j*scipy.rand(levels[0].A.shape[0], 1)
b = numpy.zeros_like(x)
x = ml.solve(b, x0=x, tol=float(numpy.finfo(numpy.float).tiny),
maxiter=candidate_iters)
work[:] += ml.operator_complexity()*ml.levels[0].A.nnz*candidate_iters*2
T0 = levels[0].T.copy()
#TEST FOR CONVERGENCE HERE
for i in range(len(ml.levels) - 2):
# alpha-SA paper does local elimination here, but after talking
# to Marian, its not clear that this helps things
# fn, kwargs = unpack_arg(eliminate_local)
# if fn == True:
# eliminate_local_candidates(x,levels[i].AggOp,levels[i].A,
# levels[i].T, **kwargs)
# add candidate to B
B = numpy.hstack((levels[i].B, x.reshape(-1, 1)))
# construct Ptent
T, R = fit_candidates(levels[i].AggOp, B)
levels[i].T = T
x = R[:, -1].reshape(-1, 1)
# smooth P
fn, kwargs = unpack_arg(smooth[i])
if fn == 'jacobi':
levels[i].P = jacobi_prolongation_smoother(levels[i].A, T,
levels[i].C, R,
**kwargs)
elif fn == 'richardson':
levels[i].P = richardson_prolongation_smoother(levels[i].A, T,
**kwargs)
elif fn == 'energy':
levels[i].P = energy_prolongation_smoother(levels[i].A, T,
levels[i].C, R, None,
(False, {}), **kwargs)
x = R[:, -1].reshape(-1, 1)
elif fn is None:
levels[i].P = T
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# construct R
if symmetry == 'symmetric': # R should reflect A's structure
levels[i].R = levels[i].P.T.asformat(levels[i].P.format)
elif symmetry == 'hermitian':
levels[i].R = levels[i].P.H.asformat(levels[i].P.format)
# construct coarse A
levels[i+1].A = levels[i].R * levels[i].A * levels[i].P
# construct bridging P
T_bridge = make_bridge(levels[i+1].T)
R_bridge = levels[i+2].B
# smooth bridging P
fn, kwargs = unpack_arg(smooth[i+1])
if fn == 'jacobi':
levels[i+1].P = jacobi_prolongation_smoother(levels[i+1].A,
T_bridge,
levels[i+1].C,
R_bridge, **kwargs)
elif fn == 'richardson':
levels[i+1].P = richardson_prolongation_smoother(levels[i+1].A,
T_bridge,
**kwargs)
elif fn == 'energy':
levels[i+1].P = energy_prolongation_smoother(levels[i+1].A,
T_bridge,
levels[i+1].C,
R_bridge, None,
(False, {}), **kwargs)
elif fn is None:
levels[i+1].P = T_bridge
else:
raise ValueError('unrecognized prolongation smoother method %s' %
str(fn))
# construct the "bridging" R
if symmetry == 'symmetric': # R should reflect A's structure
levels[i+1].R = levels[i+1].P.T.asformat(levels[i+1].P.format)
elif symmetry == 'hermitian':
levels[i+1].R = levels[i+1].P.H.asformat(levels[i+1].P.format)
# run solver on candidate
solver = multilevel_solver(levels[i+1:], coarse_solver=coarse_solver)
change_smoothers(solver, presmoother=prepostsmoother,
postsmoother=prepostsmoother)
x = solver.solve(numpy.zeros_like(x), x0=x,
tol=float(numpy.finfo(numpy.float).tiny),
maxiter=candidate_iters)
work[:] += 2 * solver.operator_complexity() * solver.levels[0].A.nnz *\
candidate_iters*2
# update values on next level
levels[i+1].B = R[:, :-1].copy()
levels[i+1].T = T_bridge
# note that we only use the x from the second coarsest level
fn, kwargs = unpack_arg(prepostsmoother)
for lvl in reversed(levels[:-2]):
x = lvl.P * x
work[:] += lvl.A.nnz*candidate_iters*2
if fn == 'gauss_seidel':
# only relax at nonzeros, so as not to mess up any locally dropped
# candidates
indices = numpy.ravel(x).nonzero()[0]
gauss_seidel_indexed(lvl.A, x, numpy.zeros_like(x), indices,
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_ne':
gauss_seidel_ne(lvl.A, x, numpy.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'gauss_seidel_nr':
gauss_seidel_nr(lvl.A, x, numpy.zeros_like(x),
iterations=candidate_iters, sweep='symmetric')
elif fn == 'jacobi':
jacobi(lvl.A, x, numpy.zeros_like(x), iterations=1,
omega=1.0 / rho_D_inv_A(lvl.A))
elif fn == 'richardson':
polynomial(lvl.A, x, numpy.zeros_like(x), iterations=1,
coeffients=[1.0/approximate_spectral_radius(lvl.A)])
elif fn == 'gmres':
x[:] = (gmres(lvl.A, numpy.zeros_like(x), x0=x,
maxiter=candidate_iters)[0]).reshape(x.shape)
else:
raise TypeError('Unrecognized smoother')
# x will be dense again, so we have to drop locally again
elim, elim_kwargs = unpack_arg(eliminate_local)
if elim is True:
x = x/norm(x, 'inf')
eliminate_local_candidates(x, levels[0].AggOp, levels[0].A, T0,
**elim_kwargs)
return x.reshape(-1, 1)
| {
"repo_name": "pombreda/pyamg",
"path": "pyamg/aggregation/adaptive.py",
"copies": "1",
"size": "32061",
"license": "bsd-3-clause",
"hash": 6261546143239529000,
"line_mean": 40.9646596859,
"line_max": 79,
"alpha_frac": 0.5377249618,
"autogenerated": false,
"ratio": 3.8309236467917316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9866537075279804,
"avg_score": 0.0004223066623855241,
"num_lines": 764
} |
"""Adaptive tracks for psychophysics (individual, or multiple randomly dealt)
"""
# Author: Ross Maddox <ross.maddox@rochester.edu>
#
# License: BSD (3-clause)
import numpy as np
import time
from scipy.stats import binom
import json
import warnings
from .. import ExperimentController
# =============================================================================
# Set up the logging callback (use write_data_line or do nothing)
# =============================================================================
def _callback_dummy(event_type, value=None, timestamp=None):
"""Take the arguments of write_data_line, but do nothing.
"""
pass
def _check_callback(callback):
"""Check to see if the callback is of an allowable type.
"""
if callback is None:
callback = _callback_dummy
elif isinstance(callback, ExperimentController):
callback = callback.write_data_line
if not callable(callback):
raise TypeError('callback must be a callable, None, or an instance of '
'ExperimentController.')
return callback
# =============================================================================
# Define the TrackerUD Class
# =============================================================================
class TrackerUD(object):
r"""Up-down adaptive tracker
This class implements a standard up-down adaptive tracker object. Based on
how it is configured, it can be used to run a fixed-step m-down n-up
tracker (staircase), or it can implement a weighted up-down procedure.
Parameters
----------
callback : callable | ExperimentController | None
The function that will be used to print the data, usually to the
experiment .tab file. It should follow the prototype of
``ExperimentController.write_data_line``. If an instance of
``ExperimentController`` is given, then it will take that object's
``write_data_line`` function. If None is given, then it will not write
the data anywhere.
up : int
The number of wrong answers necessary to move the tracker level up.
down : int
The number of correct answers necessary to move the tracker level down.
step_size_up : float | list of float
The size of the step when the tracker moves up. If float it will stay
the same. If list of float then it will change when ``change_indices``
are encountered. See note below for more specific information on
dynamic tracker parameters specified with a list.
step_size_down : float | list of float
The size of the step when the tracker moves down. If float it will stay
the same. If list of float then it will change when ``change_indices``
are encountered. See note below for more specific information on
dynamic tracker parameters specified with a list.
stop_reversals : int | np.inf
The minimum number of reversals before the tracker stops. If
``stop_trials`` is also specified, the tracker will stop when either
condition is satisfied.
stop_trials : int | np.inf
The minimum number of trials before the tracker stops. If
``stop_reversals`` is also specified, the tracker will stop when either
condition is satisfied. Only use np.inf if you are sure that the
tracker will reach the reversal stopping criteria without getting stuck
on either x_min or x_max.
start_value : float
The starting level of the tracker.
change_indices : list of int | None
The points along the tracker to change its step sizes. Has an effect
where ``step_size_up`` and ``step_size_down`` are
lists. The length of ``change_indices`` must be the same as the
length of ``step_size_up`` and ``step_size_down`` minus 1. See note
below for more specific usage information. Should be None if static
step sizes are used.
change_rule : str
Whether to change parameters based on 'trials' or 'reversals'.
x_min : float
The minimum value that the tracker level (``x``) is allowed to take.
x_max : float
The maximum value that the tracker level (``x``) is allowed to take.
repeat_limit : str
How to treat trials that try to exceed either x_min or x_max.
``reversals`` will consider these trials as reversals while staying at
the same level. ``ignore`` does not consider these trials as reversals.
Returns
-------
tracker : instance of TrackerUD
The up-down tracker object.
Notes
-----
It is common to use dynamic parameters in an adaptive tracker. For example:
the step size is often large for the first couple reversals to get in the
right general area, and then it is reduced for the remainder of the track.
This class allows that functionality by defining that parameter with a list
of values rather than a scalar. The parameter will change to the next value
in the list whenever a change criterion (number of trials or reversals) is
encountered. This means that the length of the list defining a dynamic
parameter must always be the 1 longer than that of ``change_indices``.
For the example given above::
..., step_size_up=[1., 0.2], step_size_down=[1., 0.2], \
change_indices=[2], change_rule='reversals', ...
would change the step sizes from 1 to 0.2 after two reversals.
If static step sizes are used, both ``step_size_up``
and ``step_size_down`` must be scalars and ``change_indices`` must be
None.
"""
def __init__(self, callback, up, down, step_size_up, step_size_down,
stop_reversals, stop_trials, start_value, change_indices=None,
change_rule='reversals', x_min=None, x_max=None,
repeat_limit='reversals'):
self._callback = _check_callback(callback)
if not isinstance(up, int):
raise ValueError('up must be an integer')
self._up = up
if not isinstance(down, int):
raise ValueError('down must be an integer')
self._down = down
if stop_reversals != np.inf and type(stop_reversals) != int:
raise ValueError('stop_reversals must be an integer or np.inf')
self._stop_reversals = stop_reversals
if stop_trials != np.inf and type(stop_trials) != int:
raise ValueError('stop_trials must be an integer or np.inf')
self._stop_trials = stop_trials
self._start_value = start_value
self._x_min = -np.inf if x_min is None else float(x_min)
self._x_max = np.inf if x_max is None else float(x_max)
if change_indices is None:
change_indices = [0]
if not np.isscalar(step_size_up):
raise ValueError('If step_size_up is longer than 1, you must '
'specify change indices.')
if not np.isscalar(step_size_down):
raise ValueError('If step_size_down is longer than 1, you must'
' specify change indices.')
self._change_indices = np.asarray(change_indices)
if change_rule not in ['trials', 'reversals']:
raise ValueError("change_rule must be either 'trials' or "
"'reversals'")
self._change_rule = change_rule
step_size_up = np.atleast_1d(step_size_up)
if change_indices != [0]:
if len(step_size_up) != len(change_indices) + 1:
raise ValueError('If step_size_up is not scalar it must be one'
' element longer than change_indices.')
self._step_size_up = np.asarray(step_size_up, dtype=float)
step_size_down = np.atleast_1d(step_size_down)
if change_indices != [0]:
if len(step_size_down) != len(change_indices) + 1:
raise ValueError('If step_size_down is not scalar it must be '
'one element longer than change_indices.')
self._step_size_down = np.asarray(step_size_down, dtype=float)
self._x = np.asarray([start_value], dtype=float)
if not np.isscalar(start_value):
raise TypeError('start_value must be a scalar')
self._x_current = float(start_value)
self._responses = np.asarray([], dtype=bool)
self._reversals = np.asarray([], dtype=int)
self._n_up = 0
self._n_down = 0
self._direction = 0
self._n_trials = 0
self._n_reversals = 0
self._stopped = False
self._repeat_limit = repeat_limit
self._bad_reversals = np.asarray([], dtype=bool)
self._limit_count = 0
# Now write the initialization data out
self._tracker_id = '%s-%s' % (id(self), int(round(time.time() * 1e6)))
self._callback('tracker_identify', json.dumps(dict(
tracker_id=self._tracker_id,
tracker_type='TrackerUD')))
self._callback('tracker_%s_init' % self._tracker_id, json.dumps(dict(
callback=None,
up=self._up,
down=self._down,
step_size_up=[float(s) for s in self._step_size_up],
step_size_down=[float(s) for s in self._step_size_down],
stop_reversals=self._stop_reversals,
stop_trials=self._stop_trials,
start_value=self._start_value,
change_indices=[int(s) for s in self._change_indices],
change_rule=self._change_rule,
x_min=self._x_min,
x_max=self._x_max,
repeat_limit=self._repeat_limit)))
def respond(self, correct):
"""Update the tracker based on the last response.
Parameters
----------
correct : boolean
Was the most recent subject response correct?
"""
if self._stopped:
raise RuntimeError('Tracker is stopped.')
bound = False
bad = False
reversal = False
self._responses = np.append(self._responses, correct)
self._n_trials += 1
step_dir = 0 # 0 no step, 1 up, -1 down
# Determine if it's a reversal and which way we're going
if correct:
self._n_up = 0
self._n_down += 1
if self._n_down == self._down:
step_dir = -1
self._n_down = 0
if self._direction > 0:
reversal = True
self._n_reversals += 1
if self._direction >= 0:
self._direction = -1
else:
self._n_down = 0
self._n_up += 1
if self._n_up == self._up:
step_dir = 1
self._n_up = 0
if self._direction < 0:
reversal = True
self._n_reversals += 1
if self._direction <= 0:
self._direction = 1
if self._x[-1] in [self._x_min, self._x_max]:
bound = True
# Update the staircase
if step_dir == 0:
self._x = np.append(self._x, self._x[-1])
elif step_dir < 0:
self._x = np.append(self._x, self._x[-1] -
self._current_step_size_down)
elif step_dir > 0:
self._x = np.append(self._x, self._x[-1] +
self._current_step_size_up)
if self._x_min is not -np.inf:
if self._x[-1] < self._x_min:
self._x[-1] = self._x_min
self._limit_count += 1
if bound:
bad = True
if self._repeat_limit == 'reversals':
reversal = True
self._n_reversals += 1
if self._x_max is not np.inf:
if self._x[-1] > self._x_max:
self._x[-1] = self._x_max
self._limit_count += 1
if bound:
bad = True
if self._repeat_limit == 'reversals':
reversal = True
self._n_reversals += 1
if reversal:
self._reversals = np.append(self._reversals, self._n_reversals)
else:
self._reversals = np.append(self._reversals, 0)
self._bad_reversals = np.append(self._bad_reversals, bad)
# Should we stop here?
self._stopped = self._stop_here()
if not self._stopped:
self._x_current = self._x[-1]
self._callback('tracker_%s_respond' % self._tracker_id,
correct)
else:
self._x = self._x[:-1]
self._callback(
'tracker_%s_stop' % self._tracker_id, json.dumps(dict(
responses=[int(s) for s in self._responses],
reversals=[int(s) for s in self._reversals],
x=[float(s) for s in self._x])))
def check_valid(self, n_reversals):
"""If last reversals contain reversals exceeding x_min or x_max.
Parameters
----------
n_reversals : int
Number of reversals (starting from the end to check).
Returns
-------
valid : bool
True if none of the reversals are at x_min or x_max and False
otherwise.
"""
self._valid = (not self._bad_reversals[self._reversals != 0]
[-n_reversals:].any())
return self._valid
def _stop_here(self):
if self._n_reversals == self._stop_reversals:
self._n_stop = True
elif self._n_trials == self._stop_trials:
self._n_stop = True
else:
self._n_stop = False
if self._n_stop and self._limit_count > 0:
warnings.warn('Tracker {} exceeded x_min or x_max bounds {} times.'
''.format(self._tracker_id, self._limit_count))
return self._n_stop
def _step_index(self):
if self._change_rule.lower() == 'reversals':
self._n_change = self._n_reversals
elif self._change_rule.lower() == 'trials':
self._n_change = self._n_trials
step_index = np.where(self._n_change >= self._change_indices)[0]
if len(step_index) == 0 or np.array_equal(self._change_indices, [0]):
step_index = 0
else:
step_index = step_index[-1] + 1
return step_index
@property
def _current_step_size_up(self):
return self._step_size_up[self._step_index()]
@property
def _current_step_size_down(self):
return self._step_size_down[self._step_index()]
# =========================================================================
# Define all the public properties
# =========================================================================
@property
def up(self):
return self._up
@property
def down(self):
return self._down
@property
def step_size_up(self):
return self._step_size_up
@property
def step_size_down(self):
return self._step_size_down
@property
def stop_reversals(self):
return self._stop_reversals
@property
def stop_trials(self):
return self._stop_trials
@property
def start_value(self):
return self._start_value
@property
def x_min(self):
return self._x_min
@property
def x_max(self):
return self._x_max
@property
def repeat_limit(self):
return self._repeat_limit
@property
def stopped(self):
"""Has the tracker stopped
"""
return self._stopped
@property
def x(self):
"""The staircase
"""
return self._x
@property
def x_current(self):
"""The current level
"""
return self._x_current
@property
def responses(self):
"""The response history
"""
return self._responses
@property
def n_trials(self):
"""The number of trials so far
"""
return self._n_trials
@property
def n_reversals(self):
"""The number of reversals so far
"""
return self._n_reversals
@property
def reversals(self):
"""The reversal history (0 where there was no reversal)
"""
return self._reversals
@property
def reversal_inds(self):
"""The trial indices which had reversals"""
return np.where(self._reversals)[0]
# =========================================================================
# Display functions
# =========================================================================
def plot(self, ax=None, threshold=True, n_skip=2):
"""Plot the adaptive track.
Parameters
----------
ax : AxesSubplot | None
The axes to make the plot on. If ``None`` defaults to current axes.
threshold : bool
Whether to plot the estimated threshold on the axes. Default is
True.
n_skip : int
See documentation for ``TrackerUD.threshold``.
Returns
-------
fig : Figure
The figure handle.
ax : AxesSubplot
The axes handle.
lines : list of Line2D
The handles to the staircase line and the reversal dots.
"""
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(1)
else:
fig = ax.figure
line = ax.plot(1 + np.arange(self._n_trials), self._x, 'k.-')
line[0].set_label('Trials')
dots = ax.plot(1 + np.where(self._reversals > 0)[0],
self._x[self._reversals > 0], 'ro')
dots[0].set_label('Reversals')
ax.set(xlabel='Trial number', ylabel='Level')
if threshold:
thresh = self.plot_thresh(n_skip, ax)
thresh[0].set_label('Estimated Threshold')
ax.legend()
return fig, ax, line + dots
def plot_thresh(self, n_skip=2, ax=None):
"""Plot a line showing the threshold.
Parameters
----------
n_skip : int
See documentation for ``TrackerUD.threshold``.
ax : Axes
The handle to the axes object. If None, the current axes will
be used.
Returns
-------
line : list Line2D
The handle to the threshold line, as returned from ``plt.plot``.
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
h = ax.plot([1, self._n_trials], [self.threshold(n_skip)] * 2,
'--', color='gray')
return h
def threshold(self, n_skip=2):
"""Compute the track's threshold.
Parameters
----------
n_skip : int
The number of reversals to skip at the beginning when computing
the threshold.
Returns
-------
threshold : float
The handle to the threshold line.
Notes
-----
The threshold is computed as the average of the up reversals and the
average of the down reversals. In this way if there's an unequal number
of them the asymmetry won't bias the threshold estimate.
This can also be used before the track is stopped if the experimenter
wishes.
"""
rev_inds = self.reversal_inds[n_skip:]
if len(rev_inds) < 1:
return np.nan
else:
if self._bad_reversals[rev_inds].any():
raise ValueError('Cannot calculate thresholds with reversals '
'attempting to exceed x_min or x_max. Try '
'increasing n_skip.')
return (np.mean(self._x[rev_inds[0::2]]) +
np.mean(self._x[rev_inds[1::2]])) / 2
# =============================================================================
# Define the TrackerBinom Class
# =============================================================================
class TrackerBinom(object):
"""Binomial hypothesis testing tracker
This class implements a tracker that runs a test at each trial with the
null hypothesis that the stimulus presented has no effect on the subject's
response. This would happen in the case where the subject couldn't hear a
stimulus, they were not able to do a task, or they did not understand the
task. This function's main use is training: a subject may move on to the
experiment when the null hypothesis that they aren't doing the task has
been rejected.
Parameters
----------
callback : callable | ExperimentController | None
The function that will be used to print the data, usually to the
experiment .tab file. It should follow the prototype of
``ExperimentController.write_data_line``. If an instance of
``ExperimentController`` is given, then it will take that object's
``write_data_line`` function. If ``None`` is given, then it will not
write the data anywhere.
alpha : float
The p-value which is considered significant. Must be between 0 and 1.
Note that if ``stop_early`` is ``True`` there is the potential for
multiple comparisons issues and a more stringent ``alpha`` should be
considered.
chance : float
The chance level of the task being performed. Must be between 0 and 1.
max_trials : int
The maximum number of trials to run before stopping the track without
reaching ``alpha``.
min_trials : int
The minimum number of trials to run before allowing the track to stop
on reaching ``alpha``. Has no effect if ``stop_early`` is ``False``.
stop_early : boolean
Whether to stop the adaptive track as soon as the track is not able to
reach ``alpha`` even if the remaining responses are all correct and
at least ``min_trials`` have been presented.
x_current : float
The level that you want to run the test at. This has no bearing on how
the track runs, and it will never change, but it is both useful to
document the level and required to exist if using ``TrackerDealer``.
Returns
-------
tracker : instance of TrackerBinom
The binomial tracker object.
Notes
-----
The task, unlike with an adaptive tracker, should be done all at one
stimulus level, usually an easy one. The point of this tracker is to
confirm that the subject understands the task instructions and is capable
of following them.
"""
def __init__(self, callback, alpha, chance, max_trials, min_trials=0,
stop_early=True, x_current=np.nan):
self._callback = _check_callback(callback)
self._alpha = alpha
self._chance = chance
self._max_trials = max_trials
self._min_trials = min_trials # overrules stop_early
self._stop_early = stop_early
self._pval = 1.0
self._min_p_val = 0.0 # The best they could do given responses so far
self._max_p_val = 1.0 # The worst they could do given responses so far
self._n_trials = 0
self._n_wrong = 0
self._n_correct = 0
self._pc = np.nan
self._responses = np.asarray([], dtype=bool)
self._stopped = False
self._x_current = x_current
# Now write the initialization data out
self._tracker_id = id(self)
self._callback('tracker_identify', json.dumps(dict(
tracker_id=self._tracker_id,
tracker_type='TrackerBinom')))
self._callback('tracker_%s_init' % self._tracker_id, json.dumps(dict(
callback=None,
alpha=self._alpha,
chance=self._chance,
max_trials=self._max_trials,
min_trials=self._min_trials,
stop_early=self._stop_early,
x_current=self._x_current)))
def respond(self, correct):
"""Update the tracker based on the last response.
Parameters
----------
correct : boolean
Was the most recent subject response correct?
"""
self._responses = np.append(self._responses, correct)
self._n_trials += 1
if not correct:
self._n_wrong += 1
else:
self._n_correct += 1
self._pc = float(self._n_correct) / self._n_trials
self._p_val = binom.cdf(self._n_wrong, self._n_trials,
1 - self._chance)
self._min_p_val = binom.cdf(self._n_wrong, self._max_trials,
1 - self._chance)
self._max_p_val = binom.cdf(self._n_wrong + (self._max_trials -
self._n_trials),
self._max_trials, 1 - self._chance)
if ((self._p_val <= self._alpha) or
(self._min_p_val >= self._alpha and self._stop_early)):
if self._n_trials >= self._min_trials:
self._stopped = True
if self._n_trials == self._max_trials:
self._stopped = True
if self._stopped:
self._callback(
'tracker_%s_stop' % self._tracker_id, json.dumps(dict(
responses=[int(s) for s in self._responses],
p_val=self._p_val,
success=int(self.success))))
else:
self._callback('tracker_%s_respond' % self._tracker_id, correct)
# =========================================================================
# Define all the public properties
# =========================================================================
@property
def alpha(self):
return self._alpha
@property
def chance(self):
return self._chance
@property
def max_trials(self):
return self._max_trials
@property
def stop_early(self):
return self._stop_early
@property
def p_val(self):
return self._p_val
@property
def min_p_val(self):
return self._min_p_val
@property
def max_p_val(self):
return self._max_p_val
@property
def n_trials(self):
return self._n_trials
@property
def n_wrong(self):
"""The number of incorrect trials so far
"""
return self._n_wrong
@property
def n_correct(self):
"""The number of correct trials so far
"""
return self._n_correct
@property
def pc(self):
"""Proportion correct (0-1, NaN before any responses made)
"""
return self._pc
@property
def responses(self):
"""The response history
"""
return self._responses
@property
def stopped(self):
"""Is the tracker stopped
"""
return self._stopped
@property
def success(self):
"""Has the p-value reached significance
"""
return self._p_val <= self._alpha
@property
def x_current(self):
"""Included only for compatibility with TrackerDealer
"""
return self._x_current
@property
def x(self):
"""Included only for compatibility with TrackerDealer
"""
return np.array([self._x_current for _ in range(self._n_trials)])
@property
def stop_rule(self):
return 'trials'
# =============================================================================
# Define a container for interleaving several tracks simultaneously
# =============================================================================
# TODO: Make it so you can add a list of values for each dimension (such as the
# phase in a BMLD task) and have it return that
# TODO: eventually, make a BaseTracker class so that TrackerDealer can make
# sure it has the methods / properties it needs
class TrackerDealer(object):
"""Class for selecting and pacing independent simultaneous trackers
Parameters
----------
callback : callable | ExperimentController | None
The function that will be used to print the data, usually to the
experiment .tab file. It should follow the prototype of
``ExperimentController.write_data_line``. If an instance of
``ExperimentController`` is given, then it will take that object's
``write_data_line`` function. If None is given, then it will not write
the data anywhere.
trackers : array-like
The trackers to use. Must be instances of
:class:`expyfun.stimuli.TrackerUD` or
:class:`expyfun.stimuli.TrackerBinom`.
max_lag : int
The number of reversals or trials by which the leading tracker may lead
the lagging one. The ``pace_rule`` dictates whether reversals or trials
are used.
pace_rule : str
Must be ``reversals`` or ``trials``.
rand : numpy.random.RandomState | None
The random process used to deal the trackers. If None, the process is
seeded based on the current time.
Returns
-------
dealer : instance of TrackerDealer
The tracker dealer object.
Notes
-----
The trackers can be accessed like a numpy array through the trackers
property, e.g. ``dealer.trackers[0, 1, :]``.
If dealing from TrackerBinom objects (which is probably not a good idea),
``stop_early`` must be ``False`` or else they cannot be ensured to keep
pace.
"""
def __init__(self, callback, trackers, max_lag=1, pace_rule='reversals',
rand=None):
# dim will only be used for user output. Will be stored as 0-d
self._callback = _check_callback(callback)
self._trackers = np.asarray(trackers)
for ti, t in enumerate(self._trackers.flat):
if not isinstance(t, (TrackerUD, TrackerBinom)):
raise TypeError('trackers.ravel()[%d] is type %s, must be '
'TrackerUD or TrackerBinom' % (ti, type(t)))
if isinstance(t, TrackerBinom) and t.stop_early:
raise ValueError('stop_early for trackers.flat[%d] must be '
'False to deal trials from a TrackerBinom '
'object' % (ti,))
self._shape = self._trackers.shape
self._n = np.prod(self._shape)
self._max_lag = max_lag
self._pace_rule = pace_rule
if any([isinstance(t, TrackerBinom) for t in
self._trackers]) and pace_rule == 'reversals':
raise ValueError('pace_rule must be ''trials'' to deal trials from'
' a TrackerBinom object')
if rand is None:
self._seed = int(time.time())
rand = np.random.RandomState(self._seed)
else:
self._seed = None
if not isinstance(rand, np.random.RandomState):
raise TypeError('rand must be of type '
'numpy.random.RandomState')
self._rand = rand
self._trial_complete = True
self._tracker_history = np.array([], dtype=int)
self._response_history = np.array([], dtype=int)
self._x_history = np.array([], dtype=float)
self._dealer_id = id(self)
self._callback('dealer_identify', json.dumps(dict(
dealer_id=self._dealer_id)))
self._callback('dealer_%s_init' % self._dealer_id, json.dumps(dict(
trackers=[s._tracker_id for s in self._trackers.ravel()],
shape=self._shape,
max_lag=self._max_lag,
pace_rule=self._pace_rule)))
def __iter__(self):
return self
def next(self):
"""Selects the tracker from which the next trial should be run
Returns
-------
subscripts : list-like
The position of the selected tracker.
x_current : float
The level of the selected tracker.
"""
if self.stopped:
raise(StopIteration)
if not self._trial_complete:
# Chose a new tracker before responding, so record non-response
self._response_history = np.append(self._response_history,
np.nan)
self._trial_complete = False
self._current_tracker = self._pick()
self._tracker_history = np.append(self._tracker_history,
self._current_tracker)
ss = np.unravel_index(self._current_tracker, self.shape)
level = self._trackers.flat[self._current_tracker].x_current
self._x_history = np.append(self._x_history, level)
return ss, level
def __next__(self): # for py3k compatibility
return self.next()
def _pick(self):
"""Decide which tracker from which to draw a trial
"""
if self.stopped:
raise RuntimeError('All trackers have stopped.')
active = np.where([not t.stopped for t in self._trackers.flat])[0]
if self._pace_rule == 'reversals':
pace = np.asarray([t.n_reversals for t in self._trackers.flat])
elif self._pace_rule == 'trials':
pace = np.asarray([t.n_trials for t in self._trackers.flat])
pace = pace[active]
lag = pace.max() - pace
lag_max = lag.max()
if lag_max > self._max_lag:
# This should never happen, but handle it if it does
inds = active[lag == lag_max]
elif lag_max > 0:
inds = active[lag > 0]
else:
inds = active
return inds[self._rand.randint(len(inds))]
def respond(self, correct):
"""Update the current tracker based on the last response
Parameters
----------
correct : boolean
Was the most recent subject response correct?
"""
if self._trial_complete:
raise RuntimeError('You must get a trial before you can respond.')
self._trackers.flat[self._current_tracker].respond(correct)
self._trial_complete = True
self._response_history = np.append(self._response_history, correct)
if self.stopped:
self._callback(
'dealer_%s_stop' % self._dealer_id, json.dumps(dict(
tracker_history=[int(s) for s in self._tracker_history],
response_history=[float(s) for s in
self._response_history],
x_history=[float(s) for s in self._x_history])))
def history(self, include_skips=False):
"""The history of the dealt trials and the responses
Parameters
----------
include_skips : bool
Whether or not to include trials where a tracker was dealt but
no response was made.
Returns
-------
tracker_history : list of int
The index of which tracker was dealt on each trial. Note that
the ints in this list correspond the the raveled index.
x_history : list of float
The level of the dealt tracker on each trial.
response_history : list of bool
The response history (i.e., correct or incorrect)
"""
if include_skips:
return (self._tracker_history, self._x_history,
self._response_history)
else:
inds = np.invert(np.isnan(self._response_history))
return (self._tracker_history[inds], self._x_history[inds],
self._response_history[inds].astype(bool))
@property
def shape(self):
return self._shape
@property
def stopped(self):
"""Are all the trackers stopped
"""
return all(t.stopped for t in self._trackers.flat)
@property
def trackers(self):
"""All of the tracker objects in the container
"""
return self._trackers
# =============================================================================
# Define the TrackerMHW Class
# =============================================================================
class TrackerMHW(object):
"""Up-down adaptive tracker for the modified Hughson-Westlake Procedure
This class implements a standard up-down adaptive tracker object. It is
configured to run a fixed-step m*base-down n*base-up tracker (staircase),
with a different z*base-up if there is "no response" to the start_value.
Parameters
----------
callback : callable | ExperimentController | None
The function that will be used to print the data, usually to the
experiment .tab file. It should follow the prototype of
``ExperimentController.write_data_line``. If an instance of
``ExperimentController`` is given, then it will take that object's
``write_data_line`` function. If None is given, then it will not write
the data anywhere.
x_min : float
The minimum value that the tracker level (``x``) is allowed to take.
A value must be provided and cannot be "None".
x_max : float
The maximum value that the tracker level (``x``) is allowed to take.
A value must be provided and cannot be "None".
base_step : float
The size of the base step when the tracker moves up or down.
factor_down : float
The factor multiplier of the base step when the tracker moves down.
The default is 2.
factor_up_nr : float
The factor multiplier of the base step when there is "no response"
(i.e., "incorrect" response) to the start_value. The default is 4.
start_value : float
The starting level of the tracker.
n_up_stop : int
The number of times the level has a correct response in order to stop.
repeat_limit : str
How to treat trials that try to exceed either x_min or x_max.
``reversals`` will consider these trials as reversals while staying at
the same level. ``ignore`` does not consider these trials as reversals.
Returns
-------
tracker : instance of TrackerMHW
The up-down modified Hughson-Westlake tracker object.
Notes
-----
In the modified Hughson-Westlake procedure, the up-step is the base-step,
(e.g., 5) and the down-step is 2 * base-step (e.g., 10), except when there
is "no response" to the starting value. Then the up-step is 4 * base-step
(e.g., 20) in order to quickly get within a suitable range for audibility
and finding threshold.
"""
def __init__(self, callback, x_min, x_max, base_step=5, factor_down=2,
factor_up_nr=4, start_value=40, n_up_stop=2,
repeat_limit='reversals'):
self._callback = _check_callback(callback)
self._x_min = x_min
self._x_max = x_max
self._base_step = base_step
self._factor_down = factor_down
self._factor_up_nr = factor_up_nr
self._start_value = start_value
self._n_up_stop = n_up_stop
self._repeat_limit = repeat_limit
if type(x_min) != int and type(x_min) != float:
raise TypeError('x_min must be a float or integer')
if type(x_max) != int and type(x_max) != float:
raise TypeError('x_max must be a float or integer')
self._x = np.asarray([start_value], dtype=float)
if not np.isscalar(start_value):
raise TypeError('start_value must be a scalar')
else:
if start_value % base_step != 0:
raise ValueError('start_value must be a multiple of base_step')
else:
if (x_min - start_value) % base_step != 0:
raise ValueError('x_min must be a multiple of base_step')
if (x_max - start_value) % base_step != 0:
raise ValueError('x_max must be a multiple of base_step')
if type(n_up_stop) != int:
raise TypeError('n_up_stop must be an integer')
self._x_current = float(start_value)
self._responses = np.asarray([], dtype=bool)
self._reversals = np.asarray([], dtype=int)
self._bad_reversals = np.asarray([], dtype=bool)
self._direction = 0
self._n_trials = 0
self._n_reversals = 0
self._n_correct = 0
self._stopped = False
self._limit_count = 0
self._n_xmin_correct = 0
self._levels = np.arange(x_min, x_max + base_step, base_step)
self._n_correct_levels = {level: 0 for level in self._levels}
self._threshold = np.nan
# Now write the initialization data out
self._tracker_id = '%s-%s' % (id(self), int(round(time.time() * 1e6)))
self._callback('tracker_identify', json.dumps(dict(
tracker_id=self._tracker_id,
tracker_type='TrackerMHW')))
self._callback('tracker_%s_init' % self._tracker_id, json.dumps(dict(
callback=None,
base_step=self._base_step,
factor_down=self._factor_down,
factor_up_nr=self._factor_up_nr,
start_value=self._start_value,
x_min=self._x_min,
x_max=self._x_max,
n_up_stop=self._n_up_stop,
repeat_limit=self._repeat_limit)))
def respond(self, correct):
"""Update the tracker based on the last response.
Parameters
----------
correct : boolean
Was the most recent subject response correct?
"""
if self._stopped:
raise RuntimeError('Tracker is stopped.')
bound = False
bad = False
reversal = False
self._responses = np.append(self._responses, correct)
self._n_trials += 1
step_dir = 0 # 0 no step, 1 up, -1 down
# Determine if it's a reversal and which way we're going
if correct:
self._n_correct += 1
step_dir = -1
if self._direction > 0:
reversal = True
self._n_reversals += 1
self._n_correct_levels[self._x[-1]] += 1
if self._x[-1] <= self._x_min:
self._n_xmin_correct += 1
if self._n_xmin_correct > 1:
self._n_correct_levels[self._x[-1]] += 1
if self._direction >= 0:
self._direction = -1
else:
step_dir = 1
if self._direction < 0:
reversal = True
self._n_reversals += 1
if self._direction <= 0:
self._direction = 1
if self._x[-1] in [self._x_min, self._x_max]:
bound = True
# Update the staircase
if step_dir == 0:
self._x = np.append(self._x, self._x[-1])
elif step_dir < 0:
self._x = np.append(self._x, self._x[-1] -
self._factor_down * self._base_step)
elif step_dir > 0:
if self._n_correct == 0:
self._x = np.append(self._x, self._x[-1] +
self._factor_up_nr * self._base_step)
else:
self._x = np.append(self._x, self._x[-1] + self._base_step)
if self._x[-1] < self._x_min:
self._x[-1] = self._x_min
self._limit_count += 1
if bound:
bad = True
if self._repeat_limit == 'reversals':
reversal = True
self._n_reversals += 1
if self._repeat_limit == 'ignore':
reversal = False
self._direction = 0
if self._x[-1] >= self._x_max:
self._x[-1] = self._x_max
self._limit_count += 1
if bound:
bad = True
if self._repeat_limit == 'reversals':
reversal = True
self._n_reversals += 1
if self._repeat_limit == 'ignore':
reversal = False
self._direction = 0
if reversal:
self._reversals = np.append(self._reversals, self._n_reversals)
else:
self._reversals = np.append(self._reversals, 0)
self._bad_reversals = np.append(self._bad_reversals, bad)
# Should we stop here?
self._stopped = self._stop_here()
if not self._stopped:
self._x_current = self._x[-1]
self._callback('tracker_%s_respond' % self._tracker_id,
correct)
else:
self._x = self._x[:-1]
self._callback(
'tracker_%s_stop' % self._tracker_id, json.dumps(dict(
responses=[int(s) for s in self._responses],
reversals=[int(s) for s in self._reversals],
x=[float(s) for s in self._x],
threshold=self._threshold,
n_correct_levels={int(k): v for k, v in
self._n_correct_levels.items()})))
def check_valid(self, n_reversals):
"""If last reversals contain reversals exceeding x_min or x_max.
Parameters
----------
n_reversals : int
Number of reversals (starting from the end to check).
Returns
-------
valid : bool
True if none of the reversals are at x_min or x_max and False
otherwise.
"""
self._valid = (not self._bad_reversals[self._reversals != 0]
[-n_reversals:].any())
return self._valid
def _stop_here(self):
self._threshold_reached = [self._n_correct_levels[level] ==
self._n_up_stop for level in self._levels]
if self._n_correct == 0 and self._x[
-2] == self._x_max and self._x[-1] == self._x_max:
self._n_stop = True
self._threshold = np.nan
elif len(self._x) > 3 and (self._x == self._x_max).sum() >= 4:
self._n_stop = True
elif len(self._x) > 3 and (self._x[-4:] == self._x_min).sum() >= 4:
self._n_stop = True
self._threshold = self._x_min
elif self._threshold_reached.count(True) == 1:
self._n_stop = True
self._threshold = int(self._levels[
[i for i, tr in enumerate(self._threshold_reached) if tr]])
else:
self._n_stop = False
if self._n_stop and self._limit_count > 0:
warnings.warn('Tracker {} exceeded x_min or x_max bounds {} times.'
''.format(self._tracker_id, self._limit_count))
return self._n_stop
# =========================================================================
# Define all the public properties
# =========================================================================
@property
def base_step(self):
return self._base_step
@property
def factor_down(self):
return self._factor_down
@property
def factor_up_nr(self):
return self._factor_up_nr
@property
def start_value(self):
return self._start_value
@property
def x_min(self):
return self._x_min
@property
def x_max(self):
return self._x_max
@property
def n_up_stop(self):
return self._n_up_stop
@property
def repeat_limit(self):
return self._repeat_limit
@property
def n_correct_levels(self):
return self._n_correct_levels
@property
def threshold(self):
return self._threshold
@property
def stopped(self):
"""Has the tracker stopped
"""
return self._stopped
@property
def x(self):
"""The staircase
"""
return self._x
@property
def x_current(self):
"""The current level
"""
return self._x_current
@property
def responses(self):
"""The response history
"""
return self._responses
@property
def n_trials(self):
"""The number of trials so far
"""
return self._n_trials
@property
def n_reversals(self):
"""The number of reversals so far
"""
return self._n_reversals
@property
def reversals(self):
"""The reversal history (0 where there was no reversal)
"""
return self._reversals
@property
def reversal_inds(self):
"""The trial indices which had reversals"""
return np.where(self._reversals)[0]
@property
def threshold_reached(self):
"""Which levels have threshold reached"""
return self._threshold_reached
# =========================================================================
# Display functions
# =========================================================================
def plot(self, ax=None, threshold=True):
"""Plot the adaptive track.
Parameters
----------
ax : AxesSubplot | None
The axes to make the plot on. If ``None`` defaults to current axes.
threshold : bool
Whether to plot the estimated threshold on the axes. Default is
True.
Returns
-------
fig : Figure
The figure handle.
ax : AxesSubplot
The axes handle.
lines : list of Line2D
The handles to the staircase line and the reversal dots.
"""
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(1)
else:
fig = ax.figure
line = ax.plot(1 + np.arange(self._n_trials), self._x, 'k.-')
line[0].set_label('Trials')
dots = ax.plot(1 + np.where(self._reversals > 0)[0],
self._x[self._reversals > 0], 'ro')
dots[0].set_label('Reversals')
ax.set(xlabel='Trial number', ylabel='Level (dB)')
if threshold:
thresh = self.plot_thresh(ax)
thresh[0].set_label('Threshold')
ax.legend()
return fig, ax, line + dots
def plot_thresh(self, ax=None):
"""Plot a line showing the threshold.
Parameters
----------
ax : Axes
The handle to the axes object. If None, the current axes will
be used.
Returns
-------
line : list Line2D
The handle to the threshold line, as returned from ``plt.plot``.
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
h = ax.plot([1, self._n_trials], [self._threshold] * 2, '--',
color='gray')
return h
| {
"repo_name": "LABSN/expyfun",
"path": "expyfun/stimuli/_tracker.py",
"copies": "2",
"size": "50985",
"license": "bsd-3-clause",
"hash": -6085855555935634000,
"line_mean": 35.1852377573,
"line_max": 79,
"alpha_frac": 0.5419240953,
"autogenerated": false,
"ratio": 4.207030283026652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 1409
} |
"Adaptive Weights Clustering"
__author__ = 'Kirill Efimov, Larisa Adamyan, Vladimir Spokoiny'
import scipy.spatial.distance as sci
import numpy as np
import scipy.special as spec
import matplotlib.pyplot as plt
def distance_matrix(X):
"""Compute the distance matrix of the data X"""
return sci.squareform(sci.pdist(X, 'euclidean'))
def get_lambda_hash(h, d, L_T):
"""Extract the values computed by get_lambda_table()"""
h_ratio = 1.95
return np.take(L_T, np.minimum((d / h / h_ratio * (len(L_T) - 1) ).astype(int), (len(L_T) - 1)))
def get_lambda_table(n, m):
"""Compute the ratio of the volumes of the intersection and the union of two balls.
This value depends on the ratio d/h,
where d is the distance between centers of balls
and h is the radius of these balls.
The ratio is computed over small greed on [0,1]"""
h_ratio = 1.95
x = np.linspace(0 + 1./m, h_ratio + 1./m, m+1)
a = spec.betainc((n+1.) / 2, 0.5, 1. - (x/2.)**2)
return a / (2-a)
def init_weights(dist_matrix, H, n_0, discrete=False):
"""Initialize the matrix of weights_"""
dmatrix = np.sort(dist_matrix, axis=1)
v = dmatrix[:, n_0]
n = np.size(dist_matrix,0)
if discrete == True:
for i in xrange(n):
for j in xrange(n):
if dmatrix[i, j] != 0:
v[i] = dmatrix[i, j + n_0 - 1]
break
weights = np.zeros((n,n))
for i in xrange(n):
h_closest = H[-1]
for h in H:
if h >= v[i]:
h_closest = h
break
weights[i, :] = 1 * (dist_matrix[i,:] <= h_closest)
weights = np.maximum(weights, weights.T)
return weights
def KL_init():
"""Compute the test statistics on a small greed over cube [0,1]x[0,1]
to reduce computational costs"""
m = 2000
e1 = np.linspace(0, 1, m + 1)
e = np.repeat([e1], m + 1, axis=0)
q = e.T
with np.errstate(divide='ignore', invalid='ignore'):
KL = (-1) ** (e > q) * (e-q) * np.log((e * (1. - q) / q / (1. - e)))
KL = np.nan_to_num(KL)
return KL
def get_h_intervals(dist_matrix, n_0, discrete=False, speed=1.5):
"""Compute the sequence of radii h_k for all steps"""
h_ratio = 1.95
dist_matrix = np.sort(dist_matrix)
n = np.size(dist_matrix, 0)
### Idea
aa = 2 ** 0.5
h_final = [np.min(dist_matrix[:, n_0-1])]
#for discrete
if discrete == True:
h_start = np.min(dist_matrix[:, int(n_0 * aa)])
if h_start == 0:
h_final = [np.min(dist_matrix[dist_matrix > 0])]
else:
h_final = [h_start]
h_max = np.max(dist_matrix)
#print h_max
while(h_final[-1] != h_max):
h = h_final[-1]
h_candidates_ind = np.argmax(dist_matrix > h, axis=1)
argmax_to_check = dist_matrix[range(n), h_candidates_ind] > h
h_candidates_ind -= 1
h_candidates_ind = h_candidates_ind.clip(min=n_0)
h_candidates_ind = np.maximum(((h_candidates_ind * aa ** speed).clip(max=n-1)).astype(int), h_candidates_ind+1)
argmax_to_check = argmax_to_check * (h_candidates_ind < n-1)
F = dist_matrix[np.arange(n)[argmax_to_check], h_candidates_ind[argmax_to_check]]
if len(F) == 0:
h_next=h_max
else:
h_next = np.min(F)
h_final.append(min(h_next, h * h_ratio))
print 'number of steps:', len(h_final)
a = [h_final[0]]
for i in xrange(len(h_final)-1):
if h_final[i+1] - h_final[i] != 0:
a.append(h_final[i+1])
if a[0] == 0:
del a[0]
for i in range(4):
a.append(a[-1]* 1.5)
return a, dist_matrix
def init(X, dist_matrix, n_neigh=-1, dim=-1, discrete=False, speed=1.5):
"""initialization step for all variables"""
if dist_matrix is None:
dist_matrix = distance_matrix(X)
n = np.size(dist_matrix, 0)
if X is None:
dim = 2
if dim == -1:
if np.size(X, 1) > 7:
dim = 2
else:
dim = np.size(X, 1)
if n_neigh == -1:
n_neigh = max(6, min(2 * dim + 2, 0.1 * n))
#if dim == -1:
# n_0 = 2 * np.size(X, 1) + 2
#else:
# n_0 = 2 *s dim + 2
L_T = get_lambda_table(dim, 10000)
H, dist_ordered = get_h_intervals(dist_matrix, n_neigh, discrete, speed)
v = dist_ordered[:, n_neigh-1].clip(min=H[0])
weights = init_weights(dist_matrix, H, n_neigh, discrete)
T = np.zeros((n, n))
KL = KL_init()
return L_T, dist_matrix, H, v, weights, T, KL
def cluster_step(l, weights, v, n, k, L_T, T, KL, dist_matrix, H):
""" Make one iteration of the AWC algorithm"""
neighbour_numbers = np.sum(weights * (dist_matrix <= H[k-1]), axis = 1) - 1
D2 = dist_matrix <= H[k-1]
np.fill_diagonal(D2, False)
P = D2 * weights
max_dist = np.max(dist_matrix, axis=1)
t_1 = (neighbour_numbers[np.newaxis, :] - P).T
t_12 = np.inner(P, P)
t_12x = np.inner(P, D2)
gg1 = (t_1 == t_12x) * (t_12 < 0.5 * t_12x)
#gg2 = (t_1.T == t_12x.T) * (t_12 < 0.5 * t_12x.T)
t_1 = t_1 - t_12x + t_12
q = get_lambda_hash(H[k-1], dist_matrix, L_T)
#E = (max_dist[i] < H[k-1]) * ( max_dist[i+1:] < H[k-1])
#q[E] = 1. / get_lambda_hash(np.maximum(max_dist[i], max_dist[i+1:][E]), dist_matrix[i, i+1:][E], L_T)
E = max_dist < H[k-1]
F = np.repeat([max_dist], n, axis = 0)
R = np.maximum(F.T, F)
q[E, :][:, E] = get_lambda_hash(R[E, :][:, E], dist_matrix[E, :][:, E], L_T)
t = t_1 + t_1.T - t_12
with np.errstate(invalid='ignore'):
e = t_12 / t
aa = e >= 0.95
e[t == 0] = 0
e = e.clip(min=0.05, max=0.9)
q = q.clip(min=0.05, max=0.9)
bb = e <= 0.05
e *= 2000
q *= 2000
e = e.astype(int)
q = q.astype(int)
T = t * KL[q, e]
T[np.logical_or(bb, t_12 == 0)] = np.nan
T[aa] = l
sum_v = v > H[k-1]
T[sum_v, :] = float("inf")
T[:, sum_v] = float("inf")
T[np.logical_or(gg1, gg1.T)] = np.nan
#T[gg2] = np.nan
I = (dist_matrix <= H[k]) * (dist_matrix > 0) * (T != float("inf")) * (np.isnan(T) == False)
weights[I] = 1 * (T[I] <= l)
weights[np.isnan(T)] = 0
np.fill_diagonal(weights, 1)
return
def connect_outliers(weights, n_outliers, dist_matrix, n):
""" Connect outliers to their nearest clusters_"""
N = np.sum(weights, axis=1)
outliers = N <= n_outliers
distance_order = np.argsort(dist_matrix, axis=1)
for i in xrange(n):
if outliers[i] == True:
for j in xrange(1, n):
if outliers[distance_order[i,j]] == False:
w = weights[j, :]
w[i] = 1
weights[i, :] = w
weights[:, i] = w
break
#print 'kkkkkk'
return weights
N = np.reshape(np.sum(weights, axis=1), (-1,))
outliers = np.where(N <= n_outliers)[0]
distance_order = np.argsort(dist_matrix, axis=1)
for i in outliers:
for j in xrange(1, n):
if j not in outliers[distance_order[i,j]]:
w = weights[j, :]
weights[i, :] = w
weights[:, i] = w
break
return weights
def clustering_from_weights(weights):
"""Extract cluster structure from the weight matrix represented as list,
which contains for each cluster list of indexes of points in this cluster.
Returns
-------
clusters_ : list, shape (n_clusters,)
Cluster structure of the data, i.e. list of clusters, where each cluster
is a list of points indexes in that cluster.
"""
n = len(weights)
clusters = []
adjmatrix = np.zeros((n, n), dtype=np.int8)
for i in range(n):
for j in range(n):
if weights[i, j] >= 0.5:
adjmatrix[i, j] = 1
points = range(n)
while(len(points) != 0):
neigbohours = np.sum(adjmatrix, axis=1)
candidates = np.argsort(neigbohours)
for cluster_generater in reversed(candidates):
neigbohours_i = neigbohours[adjmatrix[cluster_generater, :] == 1]
if np.sum(neigbohours_i > (neigbohours[cluster_generater] - 5)) > 0.95 * np.size(neigbohours_i,0):
break
all_cliques = ()
for i in range(len(points)):
if adjmatrix[cluster_generater, i] == 1:
all_cliques += tuple([i])
all_cliques = [all_cliques]
clusters.append([points[i] for i in range(len(points)) if i in all_cliques[0]])
adjmatrix = np.delete(adjmatrix, all_cliques[0], 0)
adjmatrix = np.delete(adjmatrix, all_cliques[0], 1)
points = [points[i] for i in range(len(points)) if i not in all_cliques[0]]
return clusters
class AWC():
"""Adaptive Weights Clustering
Parameters
----------
n_neigh : int, optional, default: -1
The number of closest neighbors to be connected on the initialization step.
If not specified, n_neigh = max(6, min(2 * effective_dim + 2, 0.1 * n_samples))
effective_dim : int, optional, default: -1
Effective dimension of data X.
If not specified, effective_dim = true dimension of the data,
in case the true dimension is less than 7, otherwise 2.
n_outliers : int, optional, default: 0
Minimum number of points each cluster must contain.
Points from clusters with smaller size will be connected to the closest cluster.
discrete : boolean, optional, default: False
Specifies if data X consists of only discrete values.
speed : int, optional, default: 1.5
Controls the number of iterations.
Increase of the speed parameter decreases the number of steps.
Attributes
----------
dmatrix : array, shape = [n_samples, n_n_samples]
Distance matrix.
clusters_ : list of lists
Cluster structure of data X i.e. list of clusters, where each cluster
is a list of points indexes in that cluster.
weights_ : numpy array, shape = [n_samples, n_samples]
Weight matrix computed by awc.
labels_ : array, shape = [n_samples,]
Labels of each point
"""
def __init__(self, n_neigh=-1, effective_dim=-1, n_outliers=0, discrete=False, speed=1.5):
self.n_neigh = n_neigh
self.effective_dim = effective_dim
self.n_outliers = n_outliers
self.discrete = discrete
self.clusters_ = None
self.speed = speed
def awc(self, l, X=None, dmatrix=None):
"""Recover cluster structure of data X.
Parameters
----------
l : int
The lambda parameter.
X : array, shape (n_samples, n_features), optional, default: None
Input data.
awc works with distance matrix.
User must specify X or dmatrix.
From X Euclidean distance matrix is computed and passed to awc.
No need to specify X, if dmatrix is specified.
dmatrix : array, shape (n_samples, n_n_samples), optional, default: None
Distance matrix. If not specified, then X must be specified, from which
Euclidean distance matrix is computed.
Returns
-------
weights_ : array, shape (n_samples, n_samples)
Final weights from which the cluster structure can be extracted.
"""
if X is None and dmatrix is None:
raise ValueError("X and dmatrix can't be both None.")
L_T, self.dmatrix, H, v, self.weights_, T, KL = init(X, dmatrix, self.n_neigh, self.effective_dim, self.discrete, self.speed)
n = np.size(self.dmatrix, 0)
for k in range(1, len(H)):
#print 'step k=', k, '/', len(H)-1
cluster_step(l, self.weights_, v, n, k, L_T, T, KL, self.dmatrix, H)
if self.n_outliers >= 1:
self.weights_ = connect_outliers(self.weights_, self.n_outliers, self.dmatrix, n)
return self.weights_
def get_clusters(self):
"""Extract cluster structure as list,
which contains for each cluster list of indexes of points in this cluster.
Returns
-------
clusters_ : list
Cluster structure of the data, i.e. list of clusters, where each cluster
is a list of points indexes in that cluster.
"""
if self.clusters_ == None:
self.clusters_ = clustering_from_weights(self.weights_)
return self.clusters_
def get_labels(self):
"""Extract labels of points from cluster structure.
Returns
-------
labels_ : array, shape (n_samples,)
Labels of each point.
"""
if self.clusters_ == None:
self.get_clusters()
Y = np.zeros(len(self.weights_))
for k in xrange(len(self.clusters_)):
Y[self.clusters_[k]] = k
self.labels = Y
return self.labels
def plot_sum_of_weights(self, lambda_interval, X=None, dmatrix=None):
"""Plot the sum of computed weights for lambda parameters from the given interval.
Parameters
----------
lambda_interval : list
Lambda parameters for which sum of weights will be computed.
X : array, shape (n_samples, n_features), optional, default: None
Input data.
No need to specify X, if dmatrix is specified.
dmatrix : array, shape (n_samples, n_n_samples), optional, default: None
Distance matrix. If not specified, then Euclidean distance matrix is computed from X.
Returns
-------
weights_computed : array, shape (n_samples, n_samples)
Computed weights for each lambda parameter from the given interval.
"""
weights_computed = []
for l in lambda_interval:
weights = self.awc(l, X, dmatrix)
weights_computed.append(weights)
sorting_order = np.argsort(lambda_interval)
lambda_interval = np.asarray(lambda_interval)
weights_summed = [np.sum(weights_computed[i]) * 1. / (len(X)**2) for i in xrange(len(lambda_interval))]
weights_summed = np.asarray(weights_summed)
plt.plot(lambda_interval[sorting_order], weights_summed[sorting_order], '-r', label=r'$\sum w_{ij}$',linewidth=3)
plt.legend(loc=2, fontsize=16)
plt.xlabel(r'$\lambda$', size=20)
#plt.ylabel(r'$\sum of weights$', size=17)
#plt.title('Sum of Weights')
plt.grid(b=True, which='major', color='k', linestyle='--')
#plt.grid(b=True, which='minor', color='r', linestyle='-', linewidth=0)
plt.xticks(np.arange(min(lambda_interval[sorting_order]), max(lambda_interval[sorting_order])+1, 2.0))
plt.show()
return weights_computed
| {
"repo_name": "larisahax/awc",
"path": "awc/awc.py",
"copies": "1",
"size": "15189",
"license": "mit",
"hash": -7683095211330169000,
"line_mean": 34.4055944056,
"line_max": 133,
"alpha_frac": 0.5487523866,
"autogenerated": false,
"ratio": 3.3294607628233233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9247866610480998,
"avg_score": 0.026069307788464936,
"num_lines": 429
} |
"""Adaptor for DynamoDB.
"""
import copy
from boto3.dynamodb import conditions
from oto import response
from oto import status
from sukimu import consts
from sukimu import operations
from sukimu import schema
class TableDynamo(schema.Table):
def __init__(self, name, connection, **options):
"""Create a TableDynamo.
Args:
name (string): Name of the table.
connection (DynamoDBConnection): The dynamodb connection.
"""
self.name = name
self.connection = connection
self.table = connection.Table(name)
self.options = options
self.indexes = {}
self.hash = None
self.range = None
def copy(self):
"""Create a copy of the current object.
Return:
TableDynamo: Copy of the current instance.
"""
return self.__class__(self.name, self.connection)
def add_index(self, index):
"""Add an index into the tbale.
Args:
index (DynamoIndex): The dynamo index.
"""
if index.type == schema.Index.PRIMARY:
self.hash = index.hash
self.range = index.range
self.indexes.update({index.name: index})
def create(self, data):
"""Create an item.
Args:
data (dict): Data for this specific item (refer to the boto
dynamodb documentation to know what values are allowed.)
Return:
Dictionnary: The data that was added.
"""
checks = ['']
if self.options.get('remove_null_values', True):
checks.append(None)
self.table.put_item(Item={
key: value for key, value in data.items() if value not in checks
})
return data
def update(self, item, data):
"""Update an item.
Args:
item (DynamoDbItem): The dynamodb item to update.
data (object): The validated fields.
Return:
Response: The response of the update.
"""
if not item:
return item
item = copy.deepcopy(item.message)
update_expression = []
expression_attribute_values = {}
expression_attribute_names = {}
for field, value in data.items():
if value == '' or field in [self.hash, self.range]:
continue
index = len(update_expression)
# Consider reserved keywords
if field.upper() in RESERVED_KEYWORDS:
field_index = len(expression_attribute_names)
expression_attribute_names.update({
'#{}'.format(field_index): field
})
field = '#{}'.format(field_index)
if '.' in field:
field_parts = field.split('.')
field_index = len(expression_attribute_names)
expression_attribute_names.update({
'#{}'.format(field_index + part_index): name
for part_index, name in enumerate(field_parts)
})
field = '.'.join([
'#{}'.format(field_index + part_index)
for part_index, name in enumerate(field_parts)
])
update_expression.append('{}=:v{}'.format(field, index))
expression_attribute_values.update({
':v{}'.format(index): value
})
item.update({field: value})
item_key = {
index: value for index, value in {
self.hash: item.get(self.hash),
self.range: item.get(self.range)
}.items() if value}
if not update_expression:
return response.Response(status=status.OK, message=item)
extra = {}
if expression_attribute_names:
extra.update(
ExpressionAttributeNames=expression_attribute_names)
self.table.update_item(
Key=item_key,
UpdateExpression='SET {}'.format(','.join(update_expression)),
ExpressionAttributeValues=expression_attribute_values,
**extra)
return response.Response(status=status.ACCEPTED, message=item)
def delete(self, item):
"""Delete an item.
Args:
item (DynamoDbItem): The dynamodb item to update.
Return:
Response: the response of the update.
"""
item_key = {
index: value for index, value in {
self.hash: item.get(self.hash),
self.range: item.get(self.range)
}.items() if value}
try:
self.table.delete_item(Key=item_key)
return response.Response()
except:
return response.Response(status=status.BAD_REQUEST)
def fetch(self, query, sort=None, limit=None, index=None):
"""Fetch one or more entries.
Fetching entries is allowed on any field. For better performance, it is
recommended to use one of the indexes. If no index is used, a scan will
be performed on the table (which are much slower.)
Args:
query (dict): the query.
sort (int): the sorting type (refer to schema.sort).
limit (int): the number of items you want to get back from the
table.
index (str): the name of the index to used. If defined, looking for
it.
Return:
List: All the fetched items.
"""
data = dict()
keys = list(query.keys())
if index:
index = self.indexes.get(index)
assert index, 'The index requested was not found.'
else:
index = self.find_index(keys)
if limit:
data.update(Limit=limit)
expressions = None
for query_segment in query.items():
key, value = query_segment
expression = None
if isinstance(value, operations.Equal):
expression = conditions.Key(key).eq(value.value)
elif isinstance(value, operations.Between):
expression = conditions.Key(key).between(*value.value)
elif isinstance(value, operations.In):
if index:
return self.fetch_many(key, value.value, index=index.name)
return self.fetch_many(key, value.value, index=index.name)
if expression:
if not expressions:
expressions = expression
else:
expressions = expressions & expression
if index:
if index.name:
data['IndexName'] = index.name
data['ScanIndexForward'] = sort is not consts.SORT_DESCENDING
dynamo = self.table.query(
KeyConditionExpression=expressions,
**data).get('Items')
else:
dynamo = self.table.scan(FilterExpression=expressions).get('Items')
if not len(dynamo):
return response.create_not_found_response()
return response.Response(
message=[dict(obj) for obj in dynamo])
def fetch_many(self, key, values, index=None):
"""Fetch more than one item.
Method used to fetch more than one item based on one key and many
values.
Args:
key (string): Name of the key.
values (list): All the values to fetch.
index (str): the name of the index to used. If defined, looking for
it.
"""
message = []
for value in values:
message.append(
self.fetch_one(
index=index, **{key: operations.Equal(value)}).message)
if not message:
return response.create_not_found_response()
return response.Response(message)
def fetch_one(self, index=None, **query):
"""Get one item.
Args:
query (dict): The query item.
index (str): the name of the index to used. If defined, looking for
it.
Return:
Response: If the item is found, it is provided in the message,
if not found, the status is set to NOT_FOUND.
"""
default_response = response.create_not_found_response()
field_names = list(query.keys())
required = 1
is_hash = self.hash in field_names
is_range = self.range in field_names
if is_range:
required = 2
item = None
if len(query) == required and is_hash:
data = dict()
data[self.hash] = query.get(self.hash).value
if is_range:
data[self.range] = query.get(self.range).value
try:
item = self.table.get_item(Key=data).get('Item', None)
except:
return default_response
if not item:
data = self.fetch(query, index=index, limit=1).message
if data and len(data) == 1:
item = data[0]
if item:
return response.Response(message=item)
return default_response
def create_table(self):
"""Create a dynamodb table.
The dynamodb table only needs to know about the indexes and the type
of those indexes.
"""
local_secondary_index = []
global_secondary_index = []
attributes = []
attribute_keys = set()
indexes = {}
indexes[schema.Index.PRIMARY] = None
table_provision = None
for index in self.schema.indexes:
hash_field = self.schema.fields.get(index.hash)
range_field = self.schema.fields.get(index.range)
provision = dict(
ReadCapacityUnits=index.read_capacity,
WriteCapacityUnits=index.write_capacity)
if index.hash not in attribute_keys:
attribute_keys.add(index.hash)
attributes.append(dict(
AttributeName=index.hash,
AttributeType=DynamoType.get(hash_field.basetype)))
key_schema = [dict(
AttributeName=index.hash,
KeyType='HASH')]
if range_field:
if index.range not in attribute_keys:
attribute_keys.add(index.range)
attributes.append(dict(
AttributeName=index.range,
AttributeType=DynamoType.get(range_field.basetype)))
key_schema.append(dict(
AttributeName=index.range,
KeyType='RANGE'))
if index.type == schema.Index.PRIMARY:
table_provision = provision
indexes[index.type] = key_schema
continue
response = dict(
IndexName=index.name,
KeySchema=key_schema,
Projection={'ProjectionType': 'ALL'})
if index.type == schema.Index.GLOBAL:
response.update(ProvisionedThroughput=provision)
indexes.setdefault(index.type, []).append(response)
self.connection.create_table(
TableName=self.name,
KeySchema=indexes.get(schema.Index.PRIMARY),
AttributeDefinitions=attributes,
ProvisionedThroughput=table_provision,
**{
index_name: index_definition
for index_name, index_definition in {
'LocalSecondaryIndexes': indexes.get(schema.Index.LOCAL),
'GlobalSecondaryIndexes': indexes.get(schema.Index.GLOBAL)
}.items() if index_definition})
class IndexDynamo(schema.Index):
def __init__(
self, type, *keys, name=None, read_capacity=None,
write_capacity=None, unique=True):
self.name = name
self.type = type
self.hash = keys[0]
self.range = keys[1] if len(keys) == 2 else None
self.read_capacity = read_capacity
self.write_capacity = write_capacity
self.unique = unique
@property
def keys(self):
keys = [self.hash]
if self.range:
keys.append(self.range)
return keys
DynamoType = {
str: 'S',
int: 'N',
}
RESERVED_KEYWORDS = '''
ABORT ABSOLUTE ACTION ADD AFTER AGENT AGGREGATE ALL ALLOCATE ALTER ANALYZE
AND ANY ARCHIVE ARE ARRAY AS ASC ASCII ASENSITIVE ASSERTION ASYMMETRIC AT
ATOMIC ATTACH ATTRIBUTE AUTH AUTHORIZATION AUTHORIZE AUTO AVG BACK BACKUP
BASE BATCH BEFORE BEGIN BETWEEN BIGINT BINARY BIT BLOB BLOCK BOOLEAN BOTH
BREADTH BUCKET BULK BY BYTE CALL CALLED CALLING CAPACITY CASCADE CASCADED
CASE CAST CATALOG CHAR CHARACTER CHECK CLASS CLOB CLOSE CLUSTER CLUSTERED
CLUSTERING CLUSTERS COALESCE COLLATE COLLATION COLLECTION COLUMN COLUMNS
COMBINE COMMENT COMMIT COMPACT COMPILE COMPRESS CONDITION CONFLICT CONNECT
CONNECTION CONSISTENCY CONSISTENT CONSTRAINT CONSTRAINTS CONSTRUCTOR
CONSUMED CONTINUE CONVERT COPY CORRESPONDING COUNT COUNTER CREATE CROSS
CUBE CURRENT CURSOR CYCLE DATA DATABASE DATE DATETIME DAY DEALLOCATE DEC
DECIMAL DECLARE DEFAULT DEFERRABLE DEFERRED DEFINE DEFINED DEFINITION
DELETE DELIMITED DEPTH DEREF DESC DESCRIBE DESCRIPTOR DETACH DETERMINISTIC
DIAGNOSTICS DIRECTORIES DISABLE DISCONNECT DISTINCT DISTRIBUTE DO DOMAIN
DOUBLE DROP DUMP DURATION DYNAMIC EACH ELEMENT ELSE ELSEIF EMPTY ENABLE END
EQUAL EQUALS ERROR ESCAPE ESCAPED EVAL EVALUATE EXCEEDED EXCEPT EXCEPTION
EXCEPTIONS EXCLUSIVE EXEC EXECUTE EXISTS EXIT EXPLAIN EXPLODE EXPORT
EXPRESSION EXTENDED EXTERNAL EXTRACT FAIL FALSE FAMILY FETCH FIELDS FILE
FILTER FILTERING FINAL FINISH FIRST FIXED FLATTERN FLOAT FOR FORCE FOREIGN
FORMAT FORWARD FOUND FREE FROM FULL FUNCTION FUNCTIONS GENERAL GENERATE GET
GLOB GLOBAL GO GOTO GRANT GREATER GROUP GROUPING HANDLER HASH HAVE HAVING
HEAP HIDDEN HOLD HOUR IDENTIFIED IDENTITY IF IGNORE IMMEDIATE IMPORT IN
INCLUDING INCLUSIVE INCREMENT INCREMENTAL INDEX INDEXED INDEXES INDICATOR
INFINITE INITIALLY INLINE INNER INNTER INOUT INPUT INSENSITIVE INSERT
INSTEAD INT INTEGER INTERSECT INTERVAL INTO INVALIDATE IS ISOLATION ITEM
ITEMS ITERATE JOIN KEY KEYS LAG LANGUAGE LARGE LAST LATERAL LEAD LEADING
LEAVE LEFT LENGTH LESS LEVEL LIKE LIMIT LIMITED LINES LIST LOAD LOCAL
LOCALTIME LOCALTIMESTAMP LOCATION LOCATOR LOCK LOCKS LOG LOGED LONG LOOP
LOWER MAP MATCH MATERIALIZED MAX MAXLEN MEMBER MERGE METHOD METRICS MIN
MINUS MINUTE MISSING MOD MODE MODIFIES MODIFY MODULE MONTH MULTI MULTISET
NAME NAMES NATIONAL NATURAL NCHAR NCLOB NEW NEXT NO NONE NOT NULL NULLIF
NUMBER NUMERIC OBJECT OF OFFLINE OFFSET OLD ON ONLINE ONLY OPAQUE OPEN
OPERATOR OPTION OR ORDER ORDINALITY OTHER OTHERS OUT OUTER OUTPUT OVER
OVERLAPS OVERRIDE OWNER PAD PARALLEL PARAMETER PARAMETERS PARTIAL PARTITION
PARTITIONED PARTITIONS PATH PERCENT PERCENTILE PERMISSION PERMISSIONS PIPE
PIPELINED PLAN POOL POSITION PRECISION PREPARE PRESERVE PRIMARY PRIOR
PRIVATE PRIVILEGES PROCEDURE PROCESSED PROJECT PROJECTION PROPERTY
PROVISIONING PUBLIC PUT QUERY QUIT QUORUM RAISE RANDOM RANGE RANK RAW READ
READS REAL REBUILD RECORD RECURSIVE REDUCE REF REFERENCE REFERENCES
REFERENCING REGEXP REGION REINDEX RELATIVE RELEASE REMAINDER RENAME REPEAT
REPLACE REQUEST RESET RESIGNAL RESOURCE RESPONSE RESTORE RESTRICT RESULT
RETURN RETURNING RETURNS REVERSE REVOKE RIGHT ROLE ROLES ROLLBACK ROLLUP
ROUTINE ROW ROWS RULE RULES SAMPLE SATISFIES SAVE SAVEPOINT SCAN SCHEMA
SCOPE SCROLL SEARCH SECOND SECTION SEGMENT SEGMENTS SELECT SELF SEMI
SENSITIVE SEPARATE SEQUENCE SERIALIZABLE SESSION SET SETS SHARD SHARE
SHARED SHORT SHOW SIGNAL SIMILAR SIZE SKEWED SMALLINT SNAPSHOT SOME SOURCE
SPACE SPACES SPARSE SPECIFIC SPECIFICTYPE SPLIT SQL SQLCODE SQLERROR
SQLEXCEPTION SQLSTATE SQLWARNING START STATE STATIC STATUS STORAGE STORE
STORED STREAM STRING STRUCT STYLE SUB SUBMULTISET SUBPARTITION SUBSTRING
SUBTYPE SUM SUPER SYMMETRIC SYNONYM SYSTEM TABLE TABLESAMPLE TEMP TEMPORARY
TERMINATED TEXT THAN THEN THROUGHPUT TIME TIMESTAMP TIMEZONE TINYINT TO
TOKEN TOTAL TOUCH TRAILING TRANSACTION TRANSFORM TRANSLATE TRANSLATION
TREAT TRIGGER TRIM TRUE TRUNCATE TTL TUPLE TYPE UNDER UNDO UNION UNIQUE
UNIT UNKNOWN UNLOGGED UNNEST UNPROCESSED UNSIGNED UNTIL UPDATE UPPER URL
USAGE USE USER USERS USING UUID VACUUM VALUE VALUED VALUES VARCHAR VARIABLE
VARIANCE VARINT VARYING VIEW VIEWS VIRTUAL VOID WAIT WHEN WHENEVER WHERE
WHILE WINDOW WITH WITHIN WITHOUT WORK WRAPPED WRITE YEAR ZONE'''
RESERVED_KEYWORDS = [
keyword.strip() for keyword in RESERVED_KEYWORDS.split(' ')
if keyword.strip()]
| {
"repo_name": "xethorn/sukimu",
"path": "sukimu/dynamodb.py",
"copies": "1",
"size": "16893",
"license": "mit",
"hash": -7047208814552277000,
"line_mean": 35.3290322581,
"line_max": 79,
"alpha_frac": 0.6128573966,
"autogenerated": false,
"ratio": 4.2032843991042546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00042747206409445503,
"num_lines": 465
} |
"""Adaptor is a subclass of :class:`~smif.model.model.Model`, to be used for converting
data between units or dimensions.
The method to override is `generate_coefficients`, which accepts two
:class:`~smif.metadata.spec.Spec` definitions.
"""
from abc import ABCMeta, abstractmethod
import numpy as np # type: ignore
from smif.data_layer.data_array import DataArray
from smif.data_layer.data_handle import DataHandle
from smif.exception import SmifDataNotFoundError
from smif.metadata import Spec
from smif.model import Model
class Adaptor(Model, metaclass=ABCMeta):
"""Abstract Adaptor, to convert inputs/outputs between other Models
Override method `generate_coefficients`, which accepts two
:class:`~smif.metadata.spec.Spec` definitions.
"""
def simulate(self, data_handle: DataHandle):
"""Convert from input to output based on matching variable names
"""
for from_spec in self.inputs.values():
if from_spec.name in self.outputs:
to_spec = self.outputs[from_spec.name]
coefficients = self.get_coefficients(data_handle, from_spec, to_spec)
data_in = data_handle.get_data(from_spec.name)
data_out = self.convert(data_in, to_spec, coefficients)
data_handle.set_results(to_spec.name, data_out)
def get_coefficients(self,
data_handle: DataHandle,
from_spec: Spec,
to_spec: Spec) -> np.ndarray:
"""Read coefficients, or generate and save if necessary
Parameters
----------
data_handle : smif.data_layer.data_handle.DataHandle
from_spec : smif.metadata.spec.Spec
to_spec : smif.metadata.spec.Spec
Returns
-------
numpy.ndarray
"""
from_dim, to_dim = self.get_convert_dims(from_spec, to_spec)
try:
coefficients = data_handle.read_coefficients(from_dim, to_dim)
except SmifDataNotFoundError:
msg = "Generating coefficients for %s to %s"
self.logger.info(msg, from_dim, to_dim)
coefficients = self.generate_coefficients(from_spec, to_spec)
data_handle.write_coefficients(from_dim, to_dim, coefficients)
return coefficients
@abstractmethod
def generate_coefficients(self, from_spec: Spec, to_spec: Spec) -> np.ndarray:
"""Generate coefficients for a pair of :class:`~smif.metadata.spec.Spec` definitions
Parameters
----------
from_spec : smif.metadata.spec.Spec
to_spec : smif.metadata.spec.Spec
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def convert(self,
data_array: DataArray,
to_spec: Spec,
coefficients: np.ndarray):
"""Convert a dataset between :class:`~smif.metadata.spec.Spec` definitions
Parameters
----------
data: smif.data_layer.data_array.DataArray
to_spec : smif.metadata.spec.Spec
coefficients : numpy.ndarray
Returns
-------
numpy.ndarray
"""
data = data_array.data
from_spec = data_array.spec
self.logger.debug("Converting from %s to %s.", from_spec.name, to_spec.name)
from_convert_dim, to_convert_dim = self.get_convert_dims(from_spec, to_spec)
self.logger.debug("Converting from %s:%s to %s:%s", from_spec.name, from_convert_dim,
to_spec.name, to_convert_dim)
axis = from_spec.dims.index(from_convert_dim)
try:
converted = self.convert_with_coefficients(data, coefficients, axis)
except ValueError as ex:
if coefficients.shape[0] != data.shape[axis]:
msg = "Coefficients do not match dimension to convert: %s != %s"
raise ValueError(msg, coefficients.shape[0], data.shape[axis]) from ex
else:
raise ex
self.logger.debug("Converted total from %s to %s", data.sum(), converted.sum())
return converted
@staticmethod
def convert_with_coefficients(data: np.ndarray,
coefficients: np.ndarray,
axis: int):
"""Unchecked conversion, given data, coefficients and axis
Parameters
----------
data : numpy.ndarray
coefficients : numpy.ndarray
axis : integer
Axis along which to apply conversion coefficients
Returns
-------
numpy.ndarray
"""
# Effectively a tensor contraction (the generalisation of dot product to multi-
# dimensional ndarrays, tensors) implemented using the Einstein summation convention,
# np.einsum, which lets us be explicit which dimensions we sum along.
# coefficients are 2D, label these 0 and 1
coefficient_axes = [0, 1]
# data is nD, label these (2 to n+1) to avoid collisions
data_axes = list(range(2, 2+data.ndim))
# except for the axis to convert: label this 0 to match first dim of coefficients
data_axes[axis] = 0
# results are also nD, label these (2 to n+1) identically to data_axes
result_axes = list(range(2, 2+data.ndim))
# except for the axis to convert: label this 1 to match second dim of coefficients
result_axes[axis] = 1
return np.einsum(coefficients, coefficient_axes, data, data_axes, result_axes)
@staticmethod
def get_convert_dims(from_spec, to_spec):
"""Get dims for conversion from a pair of :class:`~smif.metadata.spec.Spec`,
assuming only a single dimension will be converted.
Parameters
----------
from_spec : smif.metadata.Spec
to_spec : smif.metadata.Spec
Returns
-------
tuple(str)
"""
from_convert_dims = set(from_spec.dims) - set(to_spec.dims)
assert len(from_convert_dims) == 1, "Expected a single dim for conversion"
from_convert_dim = from_convert_dims.pop()
to_convert_dims = set(to_spec.dims) - set(from_spec.dims)
assert len(to_convert_dims) == 1, "Expected a single dim for conversion"
to_convert_dim = to_convert_dims.pop()
return from_convert_dim, to_convert_dim
| {
"repo_name": "willu47/smif",
"path": "src/smif/convert/adaptor.py",
"copies": "3",
"size": "6401",
"license": "mit",
"hash": -7679682117943993000,
"line_mean": 35.3693181818,
"line_max": 93,
"alpha_frac": 0.6064677394,
"autogenerated": false,
"ratio": 4.161898569570871,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013142753456191736,
"num_lines": 176
} |
""" Adaptor module.
Responsible for creating custom nodes for storing viewable data,
and connecting them to the observed object.
The nodes are added to the model, and this step should be done
prior to passing the model to the simulator.
Views can then subscribe to be updated with the given data,
according to the given type of data ("capabilities").
Observed data is stored in circular buffers of configurable size,
NOTE - As implemented, we don't support watching objects with an
output signal of zero dimensions.
"""
import collections
import nengo
import numpy as np
from capabilities.cap_factory import CapFactory
import settings
class OutputFn(collections.Callable):
""" Callable which is passed to a given node.
TODO: could this be rewritten clearer?
The callable is called by the Node during simulation with inputs
of given dimension, writes the data to the buffer, if the cap is
connected, and publishes the updated buffer.
"""
def __init__(self, sim_manager, dimensions):
""" Sets up simulator manager.
Creates buffers for data.
"""
self.sim_manager = sim_manager
self.dimensions = dimensions
self.buffer = None
self.buffer_start_step = None
self.subscribed_fns = []
def __call__(self, time, input_signal):
""" Called by Node with data of interest.
If we have subscriptions, store data and publish.
Params as passed by the simulator.
:param time: The simulated time of the step.
:param input_signal: The numpy ndarray representing the input signal.
Sets buffer_start_step if necessary.
"""
if (self.subscribed_fns):
self.buffer.append_data(input_signal)
if (self.buffer_start_step is None):
self.buffer_start_step = self.sim_manager.current_step
self.update_all()
def update_all(self):
""" Calls the update function for all
subscribed functions in self.subscribed_fns.
"""
for fn in self.subscribed_fns:
self.update(fn)
def update(self, fn):
""" Updates with data in a specific range,
starting at the beginning of the buffer to the current simulation
step.
"""
if (self.buffer and self.buffer_start_step is not None):
start_step = max(self.buffer_start_step,
self.sim_manager.current_step -
settings.MAX_WINDOW_SIZE)
start_idx = start_step - self.buffer_start_step
end_idx = max(self.sim_manager.current_step -
self.buffer_start_step, 0)
data = self.buffer.get_data()[start_idx:end_idx]
fn(start_step, self.sim_manager.dt, data)
def reset(self):
""" Resets data buffers, and calls update once
with empty data buffers.
"""
if self.buffer:
self.buffer.reset()
self.buffer_start_step = None
for fn in self.subscribed_fns:
fn(0, self.sim_manager.dt, self.buffer.get_data())
def subscribe(self, fn):
""" Subscribes fn.
Creates data buffer if necessary.
Updates with current data.
"""
if (not self.buffer):
self.buffer = Buffer(self.dimensions)
self.subscribed_fns.append(fn)
self.update(fn)
def unsubscribe(self, fn):
""" Unsubscribes fn.
Removes data buffer, and resets start time if necessary.
"""
self.subscribed_fns.remove(fn)
if (not self.subscribed_fns):
self.buffer = None
self.buffer_start_step = None
class Buffer(object):
""" Circular Buffer for storing data.
Buffer takes up max_size*2*dimensions*sizeof(data.dtype) space in memory.
"""
def __init__(self, dimensions):
""" Initializes buffer.
"""
self.max_size = settings.MAX_BUFFER_ELEMENTS
self.data = np.empty([self.max_size*2, dimensions])
self.window_start = 0
self.size = 0
def append_data(self, in_data):
"""Appends data to end of buffer.
numpy raises ValueError if dimensions of data are incorrect.
"""
i = (self.window_start + self.size) % self.max_size
self.data[i] = in_data
self.data[i+self.max_size] = in_data
if (self.size < self.max_size):
self.size += 1
else:
self.window_start = (self.window_start + 1) % self.max_size
def get_data(self):
""" Returns a view of the contents of the buffer.
"""
return self.data[self.window_start:self.size]
def reset(self):
""" Resets window start and size.
"""
self.window_start = 0
self.size = 0
class Adaptor(object):
""" Class for observing an object in a network.
For a given object, creates a node to observe each capability we have for
the object.
Views can then subscribe to data from that node.
Data is buffered only if we have at least one subscription to data.
"""
def __init__(self, sim_manager, obj):
""" Initializes adapter, adds all supported capabilities and
connects to simulator manager.
"""
self.caps = []
self.out_fns = {}
self.obj = obj
capabilites = self._load_caps()
for cap in capabilites:
if (cap.supports_obj(obj)):
self.caps.append(cap)
self._connect_obj(sim_manager)
def _load_caps(self):
""" Load capabilities.
"""
return CapFactory.get_caps()
def _connect_obj(self, sim_manager):
""" Create Nodes to observe data, and connect them to observed object.
NOTE:
When the model is given to the simulator, the simulator
creates a deep copy of the given model in order to perform simulations
on.
If we just pass our OutputFn callable, it gets deep copied, and we are
unable to subscribe to the new copied OutputFn.
Our sort of hacky way of getting around this is to wrap the callable
in a lambda (which is atomic, so gets around the deep copy), and pass
the desired callable as a default kwarg to the lambda.
TODO: OutputFn probably no longer needs to be a callable, we should
change this and rename it.
"""
for cap in self.caps:
dimensions = cap.get_out_dimensions(self.obj)
self.out_fns[cap] = OutputFn(sim_manager, dimensions)
obj_name = self.obj.name if hasattr(self.obj, 'name') else \
self.obj.__class__.__name__
label = cap.name + '(' + obj_name + ')'
# see the documentation of this function for an explanation
node = nengo.Node(output=lambda time, data, fn=self.out_fns[cap]:
fn(time, data), size_in=dimensions, label=label)
cap.connect_node(node, self.obj)
def subscribe(self, cap, fn):
""" Subscribe to given cap.
fn will be called at each step with updated numpy array
containing data from the node.
"""
self.out_fns[cap].subscribe(fn)
def unsubscribe(self, cap, fn):
""" Unsubscribe function from cap.
Removes subscription to function,
or does nothing if fn was not subscribed.
"""
self.out_fns[cap].unsubscribe(fn)
def update_all(self):
""" Calls update_all on all OutputFn objects.
"""
for cap, out_fn in self.out_fns.items():
out_fn.update_all()
def reset(self):
""" Calls reset on all OutputFn objects.
"""
for cap, out_fn in self.out_fns.items():
out_fn.reset()
| {
"repo_name": "chairmanmeow50/Brainspawn",
"path": "brainspawn/simulator/adaptor.py",
"copies": "1",
"size": "7826",
"license": "bsd-3-clause",
"hash": -6180596435921100000,
"line_mean": 33.0260869565,
"line_max": 78,
"alpha_frac": 0.6032455916,
"autogenerated": false,
"ratio": 4.193997856377278,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5297243447977278,
"avg_score": null,
"num_lines": null
} |
""" Adapt the binary search algorithm so that instead of outputting whether a specific value was found, it outputs whether a value within an interval (specified by you) was found. Write the pseudocode and code and give the time complexity of the algorithm using the Big O notation."""
def binarySearch(lower, upper, alist):
"""
take in a lower bound, upper bound, and a list as input.
then you have the range of numbers to search for.
binary search after but the search element is a range of numbers.
"""
bounds = range(lower, upper + 1)
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
midpoint = (first + last) // 2
if alist[midpoint] in bounds:
found = True
else:
if bounds[0] > alist[midpoint]:
first = midpoint + 1
else:
last = midpoint - 1
return found
"""
take lower bound, upper bound and list as inputs
set search start point, end point
while the list is not empty and element is not found
set a new midpoint using the start point and end point
if the midpoint is in bounds
element is found
else
if the smallest element is greater than the midpoint
set lower bound to the current midpoint
else
set upper bound to the current midpoint
return whether or not an element matching was found
O(N)
"""
| {
"repo_name": "megasan/210-CT",
"path": "coursework 9.py",
"copies": "1",
"size": "1511",
"license": "mit",
"hash": -8335320677247389000,
"line_mean": 33.9761904762,
"line_max": 310,
"alpha_frac": 0.619457313,
"autogenerated": false,
"ratio": 4.220670391061453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5340127704061453,
"avg_score": null,
"num_lines": null
} |
"""adapt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from django.conf import settings
import exports.urls
import reports.urls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/password_reset/$', auth_views.password_reset, name='admin_password_reset'),
url(r'^admin/password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^admin/locked/$', TemplateView.as_view(template_name=settings.AXES_LOCKOUT_TEMPLATE)),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
url(r'^exports/', include(exports.urls)),
url(r'^reports/', include(reports.urls)),
url(r'^$', include(admin.site.urls)),
]
| {
"repo_name": "deafhhs/adapt",
"path": "adapt/urls.py",
"copies": "1",
"size": "1577",
"license": "mit",
"hash": -2840120015824734700,
"line_mean": 45.3823529412,
"line_max": 129,
"alpha_frac": 0.7064045656,
"autogenerated": false,
"ratio": 3.4060475161987043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4612452081798704,
"avg_score": null,
"num_lines": null
} |
#A database of snapshots
import os, pickle
from time import sleep
from Snapshot import Snapshot
class Database:
def __init__(self):
self.history = []
self.historyLength = 14
self.snapshots = []
self.lastss = Snapshot()
def addSnapshot(self,ss):
self.snapshots.append(ss)
self.history.append(ss)
self.maintainHistory()
def size(self):
return len(self.snapshots)
def writeDatabase(self):
print("Writing database...")
if os.path.isfile("database.dat"):
os.remove("database.dat")
file = open("database.dat",mode="wb")
pickle.dump(self.snapshots,file)
file.close()
print("Database written. {} Snapshots".format(len(self.snapshots)))
def readDatabase(self):
print("Reading database...")
if not os.path.isfile("database.dat"):
print("Could not locate database.dat.")
return
file = open("database.dat",mode="rb")
self.snapshots = pickle.load(file)
def getFinale(self,sin,which=0):
scores = self.getScores(sin)
if not scores:
return "..."
list3 = list(zip(scores,self.snapshots))
list3 = sorted(list3,key=lambda x:x[0])
scores, ssl = zip(*list3)
ss = ssl[which]
while ss in self.history:
try:
which += 1
ss = ssl[which]
except:
return "..."
self.history.append(ss)
self.maintainHistory()
return ss.getFinale()
def prune(self,usesLowerLimit):
for item in self.snapshots:
if item.uses < usesLowerLimit:
self.snapshots.remove(item)
def maintainHistory(self):
if len(self.history) > self.historyLength:
self.history = self.history[-self.historyLength:]
def getScores(self,sin):
answer = []
for ss in self.snapshots:
answer.append(ss.getScore(sin))
return answer
def getSnapshot(self,i):
return self.snapshots[i]
| {
"repo_name": "henfredemars/python-personal-projects",
"path": "ChatBot/Database.py",
"copies": "1",
"size": "2177",
"license": "mit",
"hash": 7907048366013189000,
"line_mean": 27.4189189189,
"line_max": 75,
"alpha_frac": 0.5480018374,
"autogenerated": false,
"ratio": 4.251953125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.52999549624,
"avg_score": null,
"num_lines": null
} |
"""A DataFrame is a container for ingesting and preprocessing data."""
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
import collections
from .column import Column
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
__metaclass__ = ABCMeta
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if isinstance(v, Column):
s = v
elif isinstance(v, Transform) and v.input_valency() == 0:
s = v()
# TODO(jamieas): hook up these special cases again
# TODO(soergel): can these special cases be generalized?
# elif isinstance(v, pd.Series):
# s = series.NumpySeries(v.values)
# elif isinstance(v, np.ndarray):
# s = series.NumpySeries(v)
else:
raise TypeError(
"Column in assignment must be an inflow.Column, pandas.Series or a"
" numpy array; got type '%s'." % type(v).__name__)
self._columns[k] = s
def select(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Column):
value = [value]
self.assign(**dict(zip(key, value)))
def build(self):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache) for name, c in self._columns.items()}
return tensors
def to_input_fn(self, feature_keys=None, target_keys=None):
"""Build an input_fn suitable for use with Estimator.
Args:
feature_keys: the names of columns to be used as features. If None, all
columns except those in target_keys are used.
target_keys: the names of columns to be used as targets. None is
acceptable for unsupervised learning.
Returns:
A function that returns a pair of dicts (features, targets), each mapping
string names to Tensors.
Raises:
ValueError: when the feature and target key sets are non-disjoint
"""
if target_keys is None:
target_keys = []
if feature_keys is None:
if target_keys:
feature_keys = self.columns() - set(target_keys)
else:
feature_keys = self.columns()
else:
in_both = set(feature_keys) & set(target_keys)
if in_both:
raise ValueError(
"Columns cannot be used for both features and targets: %s" %
", ".join(in_both))
def input_fn():
# It's important to build all the tensors together in one DataFrame.
# If we did df.select() for both key sets and then build those, the two
# resulting DataFrames would be shuffled independently.
tensors = self.build()
# Note that (for now at least) we provide our columns to Estimator keyed
# by strings, so they are base features as far as Estimator is concerned.
# TODO(soergel): reconcile with FeatureColumn keys, Transformer etc.
features = {key: tensors[key] for key in feature_keys}
targets = {key: tensors[key] for key in target_keys}
return features, targets
return input_fn
| {
"repo_name": "ninotoshi/tensorflow",
"path": "tensorflow/contrib/learn/python/learn/dataframe/dataframe.py",
"copies": "2",
"size": "5867",
"license": "apache-2.0",
"hash": -3019765076197016600,
"line_mean": 33.1104651163,
"line_max": 79,
"alpha_frac": 0.6475200273,
"autogenerated": false,
"ratio": 4.099930118798043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008120784830716071,
"num_lines": 172
} |
""" A dataset parser that reads images from folders
Folders are scannerd recursively to find image files. Labels are based
on the folder hierarchy, just leaf folders by default.
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
from timm.utils.misc import natural_key
from .parser import Parser
from .class_map import load_class_map
from .constants import IMG_EXTENSIONS
def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True):
labels = []
filenames = []
for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True):
rel_path = os.path.relpath(root, folder) if (root != folder) else ''
label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_')
for f in files:
base, ext = os.path.splitext(f)
if ext.lower() in types:
filenames.append(os.path.join(root, f))
labels.append(label)
if class_to_idx is None:
# building class index
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)}
images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx]
if sort:
images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0]))
return images_and_targets, class_to_idx
class ParserImageFolder(Parser):
def __init__(
self,
root,
class_map=''):
super().__init__()
self.root = root
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
self.samples, self.class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx)
if len(self.samples) == 0:
raise RuntimeError(
f'Found 0 images in subfolders of {root}. Supported image extensions are {", ".join(IMG_EXTENSIONS)}')
def __getitem__(self, index):
path, target = self.samples[index]
return open(path, 'rb'), target
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0]
if basename:
filename = os.path.basename(filename)
elif not absolute:
filename = os.path.relpath(filename, self.root)
return filename
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/data/parsers/parser_image_folder.py",
"copies": "1",
"size": "2508",
"license": "apache-2.0",
"hash": -6738571055835827000,
"line_mean": 35.347826087,
"line_max": 118,
"alpha_frac": 0.6279904306,
"autogenerated": false,
"ratio": 3.6774193548387095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48054097854387096,
"avg_score": null,
"num_lines": null
} |
""" A dataset parser that reads single tarfile based datasets
This parser can read datasets consisting if a single tarfile containing images.
I am planning to deprecated it in favour of ParerImageInTar.
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import tarfile
from .parser import Parser
from .class_map import load_class_map
from .constants import IMG_EXTENSIONS
from timm.utils.misc import natural_key
def extract_tarinfo(tarfile, class_to_idx=None, sort=True):
files = []
labels = []
for ti in tarfile.getmembers():
if not ti.isfile():
continue
dirname, basename = os.path.split(ti.path)
label = os.path.basename(dirname)
ext = os.path.splitext(basename)[1]
if ext.lower() in IMG_EXTENSIONS:
files.append(ti)
labels.append(label)
if class_to_idx is None:
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)}
tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx]
if sort:
tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path))
return tarinfo_and_targets, class_to_idx
class ParserImageTar(Parser):
""" Single tarfile dataset where classes are mapped to folders within tar
NOTE: This class is being deprecated in favour of the more capable ParserImageInTar that can
operate on folders of tars or tars in tars.
"""
def __init__(self, root, class_map=''):
super().__init__()
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
assert os.path.isfile(root)
self.root = root
with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later
self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx)
self.imgs = self.samples
self.tarfile = None # lazy init in __getitem__
def __getitem__(self, index):
if self.tarfile is None:
self.tarfile = tarfile.open(self.root)
tarinfo, target = self.samples[index]
fileobj = self.tarfile.extractfile(tarinfo)
return fileobj, target
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0].name
if basename:
filename = os.path.basename(filename)
return filename
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/data/parsers/parser_image_tar.py",
"copies": "1",
"size": "2589",
"license": "apache-2.0",
"hash": 6998333572099825000,
"line_mean": 34.9583333333,
"line_max": 100,
"alpha_frac": 0.6508304365,
"autogenerated": false,
"ratio": 3.7906295754026353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9938602147256408,
"avg_score": 0.0005715729292453571,
"num_lines": 72
} |
""" A dataset parser that reads tarfile based datasets
This parser can read and extract image samples from:
* a single tar of image files
* a folder of multiple tarfiles containing imagefiles
* a tar of tars containing image files
Labels are based on the combined folder and/or tar name structure.
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import tarfile
import pickle
import logging
import numpy as np
from glob import glob
from typing import List, Dict
from timm.utils.misc import natural_key
from .parser import Parser
from .class_map import load_class_map
from .constants import IMG_EXTENSIONS
_logger = logging.getLogger(__name__)
CACHE_FILENAME_SUFFIX = '_tarinfos.pickle'
class TarState:
def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None):
self.tf: tarfile.TarFile = tf
self.ti: tarfile.TarInfo = ti
self.children: Dict[str, TarState] = {} # child states (tars within tars)
def reset(self):
self.tf = None
def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions=IMG_EXTENSIONS):
sample_count = 0
for i, ti in enumerate(tf):
if not ti.isfile():
continue
dirname, basename = os.path.split(ti.path)
name, ext = os.path.splitext(basename)
ext = ext.lower()
if ext == '.tar':
with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf:
child_info = dict(
name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[])
sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions)
_logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.')
parent_info['children'].append(child_info)
elif ext in extensions:
parent_info['samples'].append(ti)
sample_count += 1
return sample_count
def extract_tarinfos(root, class_name_to_idx=None, cache_tarinfo=None, extensions=IMG_EXTENSIONS, sort=True):
root_is_tar = False
if os.path.isfile(root):
assert os.path.splitext(root)[-1].lower() == '.tar'
tar_filenames = [root]
root, root_name = os.path.split(root)
root_name = os.path.splitext(root_name)[0]
root_is_tar = True
else:
root_name = root.strip(os.path.sep).split(os.path.sep)[-1]
tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True)
num_tars = len(tar_filenames)
tar_bytes = sum([os.path.getsize(f) for f in tar_filenames])
assert num_tars, f'No .tar files found at specified path ({root}).'
_logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...')
info = dict(tartrees=[])
cache_path = ''
if cache_tarinfo is None:
cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB
if cache_tarinfo:
cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX
cache_path = os.path.join(root, cache_filename)
if os.path.exists(cache_path):
_logger.info(f'Reading tar info from cache file {cache_path}.')
with open(cache_path, 'rb') as pf:
info = pickle.load(pf)
assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles"
else:
for i, fn in enumerate(tar_filenames):
path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0]
with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode
parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[])
num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions)
num_children = len(parent_info["children"])
_logger.debug(
f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.')
info['tartrees'].append(parent_info)
if cache_path:
_logger.info(f'Writing tar info to cache file {cache_path}.')
with open(cache_path, 'wb') as pf:
pickle.dump(info, pf)
samples = []
labels = []
build_class_map = False
if class_name_to_idx is None:
build_class_map = True
# Flatten tartree info into lists of samples and targets w/ targets based on label id via
# class map arg or from unique paths.
# NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children
# this covers my current use cases and keeps things a little easier to test for now.
tarfiles = []
def _label_from_paths(*path, leaf_only=True):
path = os.path.join(*path).strip(os.path.sep)
return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_')
def _add_samples(info, fn):
added = 0
for s in info['samples']:
label = _label_from_paths(info['path'], os.path.dirname(s.path))
if not build_class_map and label not in class_name_to_idx:
continue
samples.append((s, fn, info['ti']))
labels.append(label)
added += 1
return added
_logger.info(f'Collecting samples and building tar states.')
for parent_info in info['tartrees']:
# if tartree has children, we assume all samples are at the child level
tar_name = None if root_is_tar else parent_info['name']
tar_state = TarState()
parent_added = 0
for child_info in parent_info['children']:
child_added = _add_samples(child_info, fn=tar_name)
if child_added:
tar_state.children[child_info['name']] = TarState(ti=child_info['ti'])
parent_added += child_added
parent_added += _add_samples(parent_info, fn=tar_name)
if parent_added:
tarfiles.append((tar_name, tar_state))
del info
if build_class_map:
# build class index
sorted_labels = list(sorted(set(labels), key=natural_key))
class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)}
_logger.info(f'Mapping targets and sorting samples.')
samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx]
if sort:
samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path))
samples, targets = zip(*samples_and_targets)
samples = np.array(samples)
targets = np.array(targets)
_logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.')
return samples, targets, class_name_to_idx, tarfiles
class ParserImageInTar(Parser):
""" Multi-tarfile dataset parser where there is one .tar file per class
"""
def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None):
super().__init__()
class_name_to_idx = None
if class_map:
class_name_to_idx = load_class_map(class_map, root)
self.root = root
self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos(
self.root,
class_name_to_idx=class_name_to_idx,
cache_tarinfo=cache_tarinfo,
extensions=IMG_EXTENSIONS)
self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()}
if len(tarfiles) == 1 and tarfiles[0][0] is None:
self.root_is_tar = True
self.tar_state = tarfiles[0][1]
else:
self.root_is_tar = False
self.tar_state = dict(tarfiles)
self.cache_tarfiles = cache_tarfiles
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
sample = self.samples[index]
target = self.targets[index]
sample_ti, parent_fn, child_ti = sample
parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root
tf = None
cache_state = None
if self.cache_tarfiles:
cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn]
tf = cache_state.tf
if tf is None:
tf = tarfile.open(parent_abs)
if self.cache_tarfiles:
cache_state.tf = tf
if child_ti is not None:
ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None
if ctf is None:
ctf = tarfile.open(fileobj=tf.extractfile(child_ti))
if self.cache_tarfiles:
cache_state.children[child_ti.name].tf = ctf
tf = ctf
return tf.extractfile(sample_ti), target
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0].name
if basename:
filename = os.path.basename(filename)
return filename
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/data/parsers/parser_image_in_tar.py",
"copies": "1",
"size": "8987",
"license": "apache-2.0",
"hash": -3681078907760514600,
"line_mean": 39.481981982,
"line_max": 118,
"alpha_frac": 0.6147769,
"autogenerated": false,
"ratio": 3.628179249091643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4742956149091643,
"avg_score": null,
"num_lines": null
} |
"""A data sink implementation for the core listener notification service of
:class:`openxc.vehicle.Vehicle`.
"""
from threading import Thread
from collections import defaultdict
import logging
from openxc.measurements import Measurement, UnrecognizedMeasurementError
from .queued import QueuedSink
LOG = logging.getLogger(__name__)
class MeasurementNotifierSink(QueuedSink):
"""Notify previously registered callbacks whenever measurements of a certian
type have been received.
This data sink is the core of the asynchronous interface of
:class:`openxc.vehicle.Vehicle.`
"""
def __init__(self):
super(MeasurementNotifierSink, self).__init__()
self.callbacks = defaultdict(set)
self.notifier = self.Notifier(self.queue, self._propagate)
def register(self, measurement_class, callback):
"""Call the ``callback`` with any new values of ``measurement_class``
received.
"""
self.callbacks[Measurement.name_from_class(measurement_class)
].add(callback)
def unregister(self, measurement_class, callback):
"""Stop notifying ``callback`` of new values of ``measurement_class``.
If the callback wasn't previously registered, this method will have no
effect.
"""
self.callbacks[Measurement.name_from_class(measurement_class)
].remove(callback)
def _propagate(self, measurement, **kwargs):
measurement_callbacks = self.callbacks[measurement.name]
measurement_callbacks.update(self.callbacks[Measurement.name])
for callback in measurement_callbacks:
try:
callback(measurement, **kwargs)
except TypeError:
callback(measurement)
class Notifier(Thread):
def __init__(self, queue, callback):
super(MeasurementNotifierSink.Notifier, self).__init__()
self.daemon = True
self.queue = queue
self.callback = callback
self.start()
def run(self):
while True:
message, kwargs = self.queue.get()
try:
measurement = Measurement.from_dict(message)
self.callback(measurement, **kwargs)
self.queue.task_done()
except UnrecognizedMeasurementError as e:
LOG.warn(e)
| {
"repo_name": "openxc/openxc-python",
"path": "openxc/sinks/notifier.py",
"copies": "1",
"size": "2408",
"license": "bsd-3-clause",
"hash": -6824652568816227000,
"line_mean": 34.9402985075,
"line_max": 80,
"alpha_frac": 0.6316445183,
"autogenerated": false,
"ratio": 4.777777777777778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5909422296077778,
"avg_score": null,
"num_lines": null
} |
"""A data source for reading from pre-recorded OpenXC trace files."""
import logging
import time
from .base import DataSourceError, BytestreamDataSource
from openxc.formats.json import JsonFormatter
LOG = logging.getLogger(__name__)
class TraceDataSource(BytestreamDataSource):
"""A class to replay a previously recorded OpenXC vehicle data trace file.
For details on the trace file format, see
http://openxcplatform.com/android/testing.html.
"""
def __init__(self, filename=None, realtime=True, loop=True, **kwargs):
"""Construct the source and attempt to open the trace file.
filename - the full absolute path to the trace file
realtime - if ``True``, the trace will be replayed at approximately
the same cadence as it was recorded. Otherwise, the trace file
will be replayed as fast as possible (likely much faster than
any vehicle).
loop - if ``True``, the trace file will be looped and will provide
data until the process exist or the source is stopped.
"""
super(TraceDataSource, self).__init__(**kwargs)
self.realtime = realtime
self.loop = loop
self.filename = filename
self._reopen_file()
def _reopen_file(self):
if getattr(self, 'trace_file', None) is not None:
self.trace_file.close()
self.trace_file = self._open_file(self.filename)
self.starting_time = time.time()
def _store_timestamp(self, timestamp):
"""If not already saved, cache the first timestamp in the active trace
file on the instance.
"""
if getattr(self, 'first_timestamp', None) is None:
self.first_timestamp = timestamp
LOG.debug("Storing %d as the first timestamp of the trace file %s",
self.first_timestamp, self.filename)
def read(self):
"""Read a line of data from the input source at a time."""
line = self.trace_file.readline()
if line == '':
if self.loop:
self._reopen_file()
else:
self.trace_file.close()
self.trace_file = None
raise DataSourceError()
message = JsonFormatter.deserialize(line)
timestamp = message.get('timestamp', None)
if self.realtime and timestamp is not None:
self._store_timestamp(timestamp)
self._wait(self.starting_time, self.first_timestamp, timestamp)
return line + "\x00"
@staticmethod
def _open_file(filename):
"""Attempt to open the the file at ``filename`` for reading.
Raises:
DataSourceError, if the file cannot be opened.
"""
if filename is None:
raise DataSourceError("Trace filename is not defined")
try:
trace_file = open(filename, "r")
except IOError as e:
raise DataSourceError("Unable to open trace file %s" % filename, e)
else:
LOG.debug("Opened trace file %s", filename)
return trace_file
@staticmethod
def _wait(starting_time, first_timestamp, timestamp):
"""Given that the first timestamp in the trace file is
``first_timestamp`` and we started playing back the file at
``starting_time``, block until the current ``timestamp`` should occur.
"""
target_time = starting_time + (timestamp - first_timestamp)
time.sleep(max(target_time - time.time(), 0))
| {
"repo_name": "openxc/openxc-python",
"path": "openxc/sources/trace.py",
"copies": "1",
"size": "3543",
"license": "bsd-3-clause",
"hash": -3904050951738538500,
"line_mean": 35.5257731959,
"line_max": 79,
"alpha_frac": 0.6136042901,
"autogenerated": false,
"ratio": 4.451005025125628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010972448302887805,
"num_lines": 97
} |
"""A datasource providing different types of noise.
"""
# standard imports
from typing import Tuple
# third party imports
import numpy as np
# toolbox imports
from dltb.base.data import Data
from .datasource import Imagesource, Livesource, Random
class Noise(Random, Livesource, Imagesource):
# pylint: disable=too-many-ancestors
"""A :py:class:`Noise` is a :py:class:`Datasource` that
provides different kinds of noise.
Attributes
----------
shape: tuple
Shape of the noise array to generate.
distribution: str
Either `uniform` or `normal`.
"""
def __init__(self, key: str = "Noise", description: str = None,
shape: Tuple[int, ...] = (100, 100),
distribution: str = 'uniform', **kwargs) -> None:
"""Create a new :py:class:`Noise`
Arguments
---------
shape: tuple
Shape of the data to generate, e.g. (3,100,100) for RGB images.
"""
description = description or f"<Noise Generator {shape}>"
super().__init__(key=key, description=description,
random_generator='numpy', **kwargs)
self.shape = shape
self.distribution = distribution
def __str__(self):
return "Noise"
#
# Data
#
def _get_random(self, data: Data, shape: Tuple[int, ...] = None,
distribution: str = None, **kwargs) -> None:
# pylint: disable=arguments-differ
"""Generate a random datapoint. Parameters can be given as arguments
or will be taken from object attributes.
Arguments
---------
shape: tuple
The shape of the data to be generated.
distribution: str
The distribution (either `uniform` or `normal`).
"""
shape = shape or self.shape
distribution = distribution or self.distribution
data.array = (np.random.rand(*shape) if distribution == 'uniform' else
np.random.randn(*shape))
def _get_snapshot(self, data, snapshot: bool = True, **kwargs) -> None:
self._get_random(data, **kwargs)
super()._get_snapshot(data, snapshot, **kwargs)
| {
"repo_name": "Petr-By/qtpyvis",
"path": "dltb/datasource/noise.py",
"copies": "1",
"size": "2209",
"license": "mit",
"hash": -5813427164859042000,
"line_mean": 29.6805555556,
"line_max": 78,
"alpha_frac": 0.5830692621,
"autogenerated": false,
"ratio": 4.314453125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.53975223871,
"avg_score": null,
"num_lines": null
} |
"""A datasource reading images from a webcam.
This module mainly provides the class :py:class:`Webcam` that is
a Datasource interface for accessing the a webcam.
Examples
--------
from dltb.datasource.webcam import Webcam
webcam = Webcam()
webcam.prepare()
frame = webcam.get_data()
image = frame.data
"""
# standard imports
import sys
import time
import threading
import logging
# toolbox imports
from ..base.video import Webcam
from ..base.data import Data
from .datasource import Imagesource, Livesource
# logging
LOG = logging.getLogger(__name__)
class DataWebcam(Livesource, Imagesource):
# pylint: disable=too-many-ancestors
"""A data source fetching images from the webcam.
Attributes
----------
_backend: WebcamBackend
The WebcamBackend use for accessing the Webcam.
_device: int
Device number of the webcam to use.
"""
def __init__(self, key: str = "Webcam", description: str = "<Webcam>",
device: int = 0, **kwargs) -> None:
"""Create a new DataWebcam
Raises
------
ImportError:
The OpenCV module is not available.
"""
super().__init__(key=key, description=description, **kwargs)
self._device = device
self._backend = None
self._loop_thread = None
self._loop_data = None
def __str__(self) -> str:
return "Webcam"
#
# Preparable
#
def _prepared(self) -> bool:
"""Report if this Datasource is prepared for use.
A Datasource has to be prepared before it can be used.
"""
return super()._prepared() and (self._backend is not None)
def _prepare(self) -> None:
"""Prepare this Datasource for use.
"""
super()._prepare()
self._backend = Webcam(device=self._device)
def _unprepare(self) -> None:
"""Unprepare this Datasource. This will free resources but
the webcam can no longer be used. Call :py:meth:`prepare`
to prepare the webcam for another use.
"""
if self._backend:
del self._backend
self._backend = None
super()._unprepare()
#
# Data
#
def _get_meta(self, data: Data, **kwargs) -> None:
# pylint: disable=arguments-differ
"""Get metadata for some data.
"""
data.add_attribute('timestamp', batch=True)
data.add_attribute('backend', f"{type(self._backend).__module__}."
f"{type(self._backend).__name__}", batch=False)
super()._get_meta(data, **kwargs)
def _get_snapshot(self, data, snapshot: bool = True, **kwargs) -> None:
"""The default (and only) mode of getting data from the webcam
is reading the next frame.
Arguments
---------
snapshot: bool
If True, try to make sure that we get a current snapshot.
On some systems, the video driver buffers some frame, so that
reading just reading the frame may result in outdated data and
one should first empty the buffer before reading the data.
"""
LOG.debug("Webcam._get_data(snapshot=%r)", snapshot)
data.array = (self._loop_data if self.looping else
self._backend.get_frame(clear_buffer=snapshot))
super()._get_snapshot(data, snapshot, **kwargs)
#
# Loop
#
def start_loop(self):
"""Start an asynchronous loop cycle. This method will return
immediately, running the loop cycle in a background thread.
"""
super().start_loop()
if sys.platform == 'linux':
self._loop_thread = threading.Thread(target=self._run_loop_linux)
self._loop_thread.start()
LOG.info("Webcam: loop thread started.")
def stop_loop(self):
"""Stop a currently running loop.
"""
super().stop_loop()
if sys.platform == 'linux' and self._loop_thread is not None:
self._loop_thread.join()
self._loop_thread = None
LOG.info("Webcam: loop thread joined.")
def _run_loop_linux(self) -> None:
"""Under linux, the av-based linux capture code is using
an internal fifo (5 frames, iirc), and you cannot clean (or
say, flush) it.
Hence we will apply another loop logic: we read frames as
fast as possible and only report them at certain times.
"""
LOG.info("Webcam: employing linux loop")
fetched = 0
start_time = time.time()
while not self.loop_stop_event.is_set():
self._loop_data = self._backend.get_frame(clear_buffer=False)
last_time = time.time()
fetched += 1
LOG.debug("Webcam: fetched: %d, frames per second: %.1f",
fetched, fetched/(last_time-start_time))
#
# information
#
def _get_description(self) -> str:
description = super()._get_description()
description += ", backend="
description += ("None" if self._backend is None else
(type(self._backend).__module__ + '.' +
type(self._backend).__name__))
return description
| {
"repo_name": "Petr-By/qtpyvis",
"path": "dltb/datasource/webcam.py",
"copies": "1",
"size": "5275",
"license": "mit",
"hash": 984577935876798000,
"line_mean": 29.8479532164,
"line_max": 77,
"alpha_frac": 0.5793364929,
"autogenerated": false,
"ratio": 4.160094637223975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5239431130123975,
"avg_score": null,
"num_lines": null
} |
"""A data store holds the bulk of model setup, intermediate and output data:
- scenario variant and narrative data (including parameter defaults)
- model interventions, initial conditions and state
- conversion coefficients
- results
"""
from abc import ABCMeta, abstractmethod
from typing import Dict, List
from smif.data_layer.data_array import DataArray
class DataStore(metaclass=ABCMeta):
"""A DataStore must implement each of the abstract methods defined in this interface
"""
# region DataArray
@abstractmethod
def read_scenario_variant_data(self, key, spec, timestep=None) -> DataArray:
"""Read data array
Parameters
----------
key : str
spec : ~smif.metadata.spec.Spec
timestep : int (optional)
If None, read data for all timesteps
Returns
-------
data_array : ~smif.data_layer.data_array.DataArray
"""
@abstractmethod
def write_scenario_variant_data(self, key, data_array, timestep=None):
"""Write data array
Parameters
----------
key : str
data_array : ~smif.data_layer.data_array.DataArray
timestep : int (optional)
If None, write data for all timesteps
"""
@abstractmethod
def read_narrative_variant_data(self, key, spec, timestep=None):
"""Read data array
Parameters
----------
key : str
spec : ~smif.metadata.spec.Spec
timestep : int (optional)
If None, read data for all timesteps
Returns
-------
data_array : ~smif.data_layer.data_array.DataArray
"""
@abstractmethod
def write_narrative_variant_data(self, key, data_array, timestep=None):
"""Write data array
Parameters
----------
key : str
data_array : ~smif.data_layer.data_array.DataArray
timestep : int (optional)
If None, write data for all timesteps
"""
@abstractmethod
def read_model_parameter_default(self, key, spec):
"""Read data array
Parameters
----------
key : str
spec : ~smif.metadata.spec.Spec
Returns
-------
data_array : ~smif.data_layer.data_array.DataArray
"""
@abstractmethod
def write_model_parameter_default(self, key, data_array):
"""Read data array
Parameters
----------
key : str
data_array : ~smif.data_layer.data_array.DataArray
Returns
-------
data_array : ~smif.data_layer.data_array.DataArray
"""
# endregion
# region Interventions
@abstractmethod
def read_interventions(self, key):
"""Read interventions data for `key`
Parameters
----------
key : str
Returns
-------
dict[str, dict]
A dict of intervention dictionaries containing intervention
attributes keyed by intervention name
"""
@abstractmethod
def write_interventions(self, key, interventions):
"""Write interventions data for `key`
Parameters
----------
key : str
interventions : dict[str, dict]
"""
@abstractmethod
def read_initial_conditions(self, key) -> List[Dict]:
"""Read historical interventions for `key`
Parameters
----------
key : str
Returns
-------
list[dict]
"""
@abstractmethod
def write_initial_conditions(self, key, initial_conditions):
"""Write historical interventions for `key`
Parameters
----------
key : str
initial_conditions: list[dict]
"""
# endregion
# region State
@abstractmethod
def read_state(self, modelrun_name, timestep, decision_iteration=None) -> List[Dict]:
"""Read list of (name, build_year) for a given model_run, timestep,
decision
Parameters
----------
model_run_name : str
timestep : int
decision_iteration : int, optional
Returns
-------
list[dict]
"""
@abstractmethod
def write_state(self, state: List[Dict],
modelrun_name: str,
timestep: int,
decision_iteration=None):
"""State is a list of decisions with name and build_year.
State is output from the DecisionManager
Parameters
----------
state : list[dict]
model_run_name : str
timestep : int
decision_iteration : int, optional
"""
# endregion
# region Conversion coefficients
@abstractmethod
def read_coefficients(self, source_dim, destination_dim):
"""Reads coefficients from the store
Coefficients are uniquely identified by their source/destination dimensions.
This method and `write_coefficients` implement caching of conversion
coefficients between a single pair of dimensions.
Parameters
----------
source_dim : str
dimension name
destination_dim : str
dimension name
Returns
-------
numpy.ndarray
Notes
-----
To be called from :class:`~smif.convert.adaptor.Adaptor` implementations.
"""
@abstractmethod
def write_coefficients(self, source_dim, destination_dim, data):
"""Writes coefficients to the store
Coefficients are uniquely identified by their source/destination dimensions.
This method and `read_coefficients` implement caching of conversion
coefficients between a single pair of dimensions.
Parameters
----------
source_dim : str
dimension name
destination_dim : str
dimension name
data : numpy.ndarray
Notes
-----
To be called from :class:`~smif.convert.adaptor.Adaptor` implementations.
"""
# endregion
# region Results
@abstractmethod
def read_results(self, modelrun_name, model_name, output_spec, timestep=None,
decision_iteration=None) -> DataArray:
"""Return results of a model from a model_run for a given output at a timestep and
decision iteration
Parameters
----------
model_run_id : str
model_name : str
output_spec : ~smif.metadata.spec.Spec
timestep : int, default=None
decision_iteration : int, default=None
Returns
-------
~smif.data_layer.data_array.DataArray
"""
@abstractmethod
def write_results(self, data, modelrun_name, model_name, timestep=None,
decision_iteration=None):
"""Write results of a `model_name` in `model_run_name` for a given `output_name`
Parameters
----------
data_array : ~smif.data_layer.data_array.DataArray
model_run_id : str
model_name : str
timestep : int, optional
decision_iteration : int, optional
"""
@abstractmethod
def available_results(self, modelrun_name):
"""List available results from a model run
Returns
-------
list[tuple]
Each tuple is (timestep, decision_iteration, model_name, output_name)
"""
# endregion
| {
"repo_name": "willu47/smif",
"path": "src/smif/data_layer/abstract_data_store.py",
"copies": "1",
"size": "7442",
"license": "mit",
"hash": 1239349126053676800,
"line_mean": 25.8664259928,
"line_max": 90,
"alpha_frac": 0.5713517872,
"autogenerated": false,
"ratio": 4.657071339173967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5728423126373967,
"avg_score": null,
"num_lines": null
} |
"""A data structure for representing dependency tree graphs."""
__author__ = ["John Stewart <free-variation>"]
from typing import Dict, List, Union
from xml.etree.ElementTree import Element, ElementTree
from cltk.core.data_types import Doc, Process, Word
from cltk.core.exceptions import CLTKException
from cltk.morphology.universal_dependencies_features import (
NOMINAL_FEATURES,
OTHER_FEATURES,
VERBAL_FEATURES,
MorphosyntacticFeature,
)
ALL_POSSIBLE_FEATURES = NOMINAL_FEATURES + VERBAL_FEATURES + OTHER_FEATURES
class Form(Element):
"""For the word (ie, node) of a dependency tree and its attributes. Inherits
from the ``Element`` class of Python's ``xml.etree`` library.
>>> desc_form = Form('described')
>>> desc_form
described_0
>>> desc_form.set('Tense', 'Past')
>>> desc_form
described_0
>>> desc_form / 'VBN'
described_0/VBN
>>> desc_form.full_str()
'described_0 [Tense=Past,pos=VBN]'
"""
def __init__(self, form: str, form_id: int = 0) -> None:
"""Constructor for the Form class."""
Element.__init__(self, form, attrib={"form_id": str(form_id)})
def __truediv__(self, pos_tag: str) -> "Form":
"""Assigns the POS feature for current form. This is
done by overloading ``operator.truediv()`` (``a / b``) to
perform ``.set()`` upon and ``Element`` of the xml library.
>>> desc_form = Form('described')
>>> desc_form / 'VBN'
described_0/VBN
>>> import operator
>>> desc_form = Form('described')
>>> operator.truediv(desc_form, 'VBN')
described_0/VBN
"""
self.set("pos", pos_tag)
return self
def __rshift__(self, other: Union["Form", str]) -> "Dependency":
"""Create a dependency between this form as governor, to
the other as dependent. Adds the dependent to the children
of this form. This is done by overloading ``operator.rshift()``
(``a >> b``) to perform ``.append()`` upon ``Element`` of the xml
library. Returns ``Dependency`` xxx
>>> john = Form('John', 1) / 'NNP'
>>> john
John_1/NNP
>>> loves = Form('loves', 2) / 'VRB'
>>> loves
loves_2/VRB
>>> mary = Form('Mary', 3) / 'NNP'
>>> mary
Mary_3/NNP
"""
other = Form(other) if isinstance(other, str) else other
self.append(other)
return Dependency(self, other)
def get_dependencies(self, relation: str) -> List["Dependency"]:
"""Extract dependents of this form for the specified
dependency relation.
>>> john = Form('John', 1) / 'NNP'
>>> loves = Form('loves', 2) / 'VRB'
>>> mary = Form('Mary', 3) / 'NNP'
>>> loves >> john | 'subj'
subj(loves_2/VRB, John_1/NNP)
>>> loves >> mary | 'obj'
obj(loves_2/VRB, Mary_3/NNP)
>>> loves.get_dependencies('subj')
[subj(loves_2/VRB, John_1/NNP)]
>>> loves.get_dependencies('obj')
[obj(loves_2/VRB, Mary_3/NNP)]
"""
deps = self.findall('*[@relation="{}"]'.format(relation))
return [Dependency(self, dep, relation) for dep in deps]
def __str__(self) -> str:
return (
self.tag
+ "_"
+ self("form_id")
+ (("/" + self("pos")) if self("pos") else "")
)
__repr__ = __str__
def full_str(self, include_relation=True) -> str:
"""Returns a string containing all features of the Form.
The ID is attached to the text, and the relation is
optionally suppressed.
>>> loves = Form('loves', 2) / 'VRB'
>>> loves.full_str()
'loves_2 [pos=VRB]'
>>> john = Form('John', 1) / 'NNP'
>>> loves >> john | 'subj'
subj(loves_2/VRB, John_1/NNP)
>>> john.full_str(True)
'John_1 [pos=NNP,relation=subj]'
"""
excluded = ["form_id", "relation"] if not include_relation else ["form_id"]
return "{0}_{1} [{2}]".format(
self.tag,
self("form_id"),
",".join(
[
feature + "=" + self(feature)
for feature in self.attrib.keys()
if feature not in excluded
]
),
)
def __call__(self, feature: str) -> str:
return self.get(feature)
@staticmethod
def to_form(word: Word) -> "Form":
"""Converts a ``CLTK`` ``Word`` object to a ``Form``.
TODO: The Form info that prints is incomplete/ugly; correct str repr of ``Form``
>>> from cltk.morphology.universal_dependencies_features import Case, Gender, Number
>>> cltk_word = Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='Gallia', pos='NOUN', lemma='mallis', scansion=None, xpos='A1|grn1|casA|gen2', upos='NOUN', dependency_relation='nsubj', governor=3, embedding=[], stop=False, named_entity=True)
>>> cltk_word.features[Case] = Case.nominative
>>> cltk_word.features[Gender] = Gender.feminine
>>> cltk_word.features[Number] = Number.singular
>>> f = Form.to_form(cltk_word)
>>> f.full_str()
'Gallia_0 [lemma=mallis,pos=NOUN,upos=NOUN,xpos=A1|grn1|casA|gen2,Case=nominative,Gender=feminine,Number=singular]'
"""
form = Form(word.string, form_id=word.index_token)
form.set("lemma", word.lemma)
form.set("pos", str(word.pos))
form.set("upos", word.upos)
form.set("xpos", word.xpos)
for feature_name, feature_values in word.features.all():
if feature_values is None:
print(word.stanza_features)
print(word.features)
form.set(str(feature_name), str(feature_values[0]))
return form
class Dependency:
"""The asymmetric binary relationship (or edge) between a governing
Form (the "head") and a subordinate Form (the "dependent").
In principle the relationship could capture any form-to-form relation
that the systems deems of interest, be it syntactic, semantic, or discursive.
If the `relation` attribute is not speficied, then the dependency simply states
that there's some asymmetric relationship between the head and the dependenent.
This is an *untyped* dependency.
For a *typed* dependency, a string value is supplied for the `relation` attribute.
"""
def __init__(self, head: Form, dep: Form, relation: str = None) -> None:
self.head = head
self.dep = dep
self.relation = relation
def __str__(self) -> str:
return "{0}({1}, {2})".format(
self.relation if self.relation else "", self.head, self.dep
)
__repr__ = __str__
def __or__(self, relation: str) -> "Dependency":
self.relation = relation
self.dep.set("relation", relation)
return self
class DependencyTree(ElementTree):
"""The hierarchical tree representing the entirety of a parse."""
def __init__(self, root: Form) -> None:
root.set("relation", "root")
ElementTree.__init__(self, root)
def get_dependencies(self) -> List[Dependency]:
"""Returns a list of all the dependency relations in the tree,
generated by depth-first search.
>>> from cltk.languages.example_texts import get_example_text
>>> from cltk.dependency.processes import StanzaProcess
>>> process_stanza = StanzaProcess(language="lat")
>>> output_doc = process_stanza.run(Doc(raw=get_example_text("lat")))
>>> a_sentence = output_doc.sentences[0]
>>> t = DependencyTree.to_tree(a_sentence)
>>> len(t.get_dependencies())
28
"""
def _get_deps(node: Form, deps: List[Dependency]) -> List[Dependency]:
for child_node in list(node):
deps = _get_deps(child_node, deps)
deps.extend(node.get_dependencies(child_node("relation")))
return deps
deps = _get_deps(self.getroot(), [])
deps.append(Dependency(None, self.getroot(), "root"))
return deps
def print_tree(self, all_features: bool = False):
"""Prints a pretty-printed (indented) representation
of the dependency tree. If all_features is True, then
each node is printed with its complete feature bundles.
"""
def _print_treelet(node: Form, indent: int, all_features: bool):
edge = "└─ " if indent > 0 else ""
node_str = node.full_str(False) if all_features else str(node)
print(" " * indent + edge + node("relation") + " | " + node_str)
for child_node in list(node):
_print_treelet(child_node, indent + 4, all_features)
_print_treelet(self.getroot(), indent=0, all_features=all_features)
@staticmethod
def to_tree(sentence: List[Word]) -> "DependencyTree":
"""Factory method to create trees from sentences parses, i.e. lists of words.
>>> from cltk.languages.example_texts import get_example_text
>>> from cltk.dependency.processes import StanzaProcess
>>> process_stanza = StanzaProcess(language="lat")
>>> output_doc = process_stanza.run(Doc(raw=get_example_text("lat")))
>>> a_sentence = output_doc.sentences[0]
>>> t = DependencyTree.to_tree(a_sentence)
>>> t.findall(".")
[divisa_3/verb]
"""
forms = {} # type: Dict[int, Form]
for word in sentence:
forms[word.index_token] = Form.to_form(word)
root = None
for word in sentence:
if word.dependency_relation == "root":
root = forms[word.index_token]
elif word.governor != -1:
# only add a non-root element to the tree if it has a governor (i.e. not -1)
gov = forms[word.governor]
dep = forms[word.index_token]
gov >> dep | word.dependency_relation
return DependencyTree(root) if root else None
| {
"repo_name": "diyclassics/cltk",
"path": "src/cltk/dependency/tree.py",
"copies": "4",
"size": "10141",
"license": "mit",
"hash": -9107261459092737000,
"line_mean": 35.7282608696,
"line_max": 291,
"alpha_frac": 0.5766005722,
"autogenerated": false,
"ratio": 3.679491833030853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6256092405230853,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
import subprocess
import tempfile
import uuid
# A temporary directory on the Android device.
DEVICE_TEMP_DIR = '/data/local/tmp'
def shell(args):
"""
Execute 'adb shell' with the given arguments.
Raise an exception if 'adb shell' returns a non-zero exit code.
Note that this only occurs if communication with the connected device
fails, not if the command run on the device fails.
"""
return subprocess.check_output(['adb', 'shell'] + args)
def rmdir(path):
"""Remove all files in the device directory at `path`."""
shell(['rm', '-rf', '{}/*'.format(path)])
def push(local_path, device_path):
"""Move the file at the given local path to the path on the device."""
return subprocess.check_output(['adb', 'push', local_path, device_path],
stderr=subprocess.STDOUT).strip()
def reboot():
"""Reboot the connected Android device, waiting for it to return online."""
subprocess.check_call(['adb', 'reboot'])
subprocess.check_call(['adb', 'wait-for-device'])
def _create_executable_on_device(device_path, contents):
_, tmp = tempfile.mkstemp()
with open(tmp, 'w') as f:
f.write(contents)
push(tmp, device_path)
shell(['chmod', '755', device_path])
def execute_on_device(executable_path, executable_arguments):
"""
Run an executable on an Android device.
Push an executable at the given 'executable_path' to an Android device,
then execute that executable on the device, passing any additional
'executable_arguments'. Return 0 if the executable succeeded when run on
device, and 1 otherwise.
This function is not as simple as calling 'adb shell', for two reasons:
1. 'adb shell' can only take input up to a certain length, so it fails for
long executable names or when a large amount of arguments are passed to
the executable. This function attempts to limit the size of any string
passed to 'adb shell'.
2. 'adb shell' ignores the exit code of any command it runs. This function
therefore uses its own mechanisms to determine whether the executable
had a successful exit code when run on device.
"""
# We'll be running the executable in a temporary directory in
# /data/local/tmp. `adb shell` has trouble with commands that
# exceed a certain length, so to err on the safe side we only
# use the first 10 characters of the UUID.
uuid_dir = '{}/{}'.format(DEVICE_TEMP_DIR, str(uuid.uuid4())[:10])
shell(['mkdir', '-p', uuid_dir])
# `adb` can only handle commands under a certain length. No matter what the
# original executable's name, on device we call it `__executable`.
executable = '{}/__executable'.format(uuid_dir)
push(executable_path, executable)
# When running the executable on the device, we need to pass it the same
# arguments, as well as specify the correct LD_LIBRARY_PATH. Save these
# to a file we can easily call multiple times.
executable_with_args = '{}/__executable_with_args'.format(uuid_dir)
_create_executable_on_device(
executable_with_args,
'LD_LIBRARY_PATH={uuid_dir}:{tmp_dir} '
'{executable} {executable_arguments}'.format(
uuid_dir=uuid_dir,
tmp_dir=DEVICE_TEMP_DIR,
executable=executable,
executable_arguments=' '.join(executable_arguments)))
# Write the output from the test executable to a file named '__stdout', and
# if the test executable succeeds, write 'SUCCEEDED' to a file
# named '__succeeded'. We do this because `adb shell` does not report
# the exit code of the command it executes on the device, so instead we
# check the '__succeeded' file for our string.
executable_stdout = '{}/__stdout'.format(uuid_dir)
succeeded_token = 'SUCCEEDED'
executable_succeeded = '{}/__succeeded'.format(uuid_dir)
executable_piped = '{}/__executable_piped'.format(uuid_dir)
_create_executable_on_device(
executable_piped,
'{executable_with_args} > {executable_stdout} && '
'echo "{succeeded_token}" > {executable_succeeded}'.format(
executable_with_args=executable_with_args,
executable_stdout=executable_stdout,
succeeded_token=succeeded_token,
executable_succeeded=executable_succeeded))
# We've pushed everything we need to the device.
# Now execute the wrapper script.
shell([executable_piped])
# Grab the results of running the executable on device.
stdout = shell(['cat', executable_stdout])
exitcode = shell(['cat', executable_succeeded])
if not exitcode.startswith(succeeded_token):
debug_command = '$ adb shell {}'.format(executable_with_args)
print('Executable exited with a non-zero code on the Android device.\n'
'Device stdout:\n'
'{stdout}\n'
'To debug, run:\n'
'{debug_command}\n'.format(
stdout=stdout,
debug_command=debug_command))
# Exit early so that the output isn't passed to FileCheck, nor are any
# temporary directories removed; this allows the user to re-run
# the executable on the device.
return 1
print(stdout)
shell(['rm', '-rf', uuid_dir])
return 0
| {
"repo_name": "ben-ng/swift",
"path": "utils/android/adb/commands.py",
"copies": "1",
"size": "6053",
"license": "apache-2.0",
"hash": 5749937097594741000,
"line_mean": 38.8223684211,
"line_max": 79,
"alpha_frac": 0.6484387907,
"autogenerated": false,
"ratio": 4.259676284306826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 152
} |
from __future__ import print_function
import os
import subprocess
import tempfile
import uuid
# A temporary directory on the Android device.
DEVICE_TEMP_DIR = '/data/local/tmp'
ENV_PREFIX = 'ANDROID_CHILD_'
def shell(args):
"""
Execute 'adb shell' with the given arguments.
Raise an exception if 'adb shell' returns a non-zero exit code.
Note that this only occurs if communication with the connected device
fails, not if the command run on the device fails.
"""
return subprocess.check_output(['adb', 'shell'] + args)
def rmdir(path):
"""Remove all files in the device directory at `path`."""
shell(['rm', '-rf', '{}/*'.format(path)])
def push(local_paths, device_path):
"""Move the files at the given local paths to the path on the device."""
if isinstance(local_paths, str):
local_paths = [local_paths]
try:
# In recent versions of ADB, push supports --sync, which checksums the
# files to be transmitted and skip the ones that haven't changed, which
# improves the effective transfer speed.
return subprocess.check_output(
['adb', 'push', '--sync'] + local_paths + [device_path],
stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
if "unrecognized option '--sync'" in e.output:
return subprocess.check_output(
['adb', 'push'] + local_paths + [device_path],
stderr=subprocess.STDOUT).strip()
else:
raise e
def reboot():
"""Reboot the connected Android device, waiting for it to return online."""
subprocess.check_call(['adb', 'reboot'])
subprocess.check_call(['adb', 'wait-for-device'])
def _create_executable_on_device(device_path, contents):
_, tmp = tempfile.mkstemp()
with open(tmp, 'w') as f:
f.write(contents)
push(tmp, device_path)
shell(['chmod', '755', device_path])
def execute_on_device(executable_path, executable_arguments):
"""
Run an executable on an Android device.
Push an executable at the given 'executable_path' to an Android device,
then execute that executable on the device, passing any additional
'executable_arguments'. Return 0 if the executable succeeded when run on
device, and 1 otherwise.
This function is not as simple as calling 'adb shell', for two reasons:
1. 'adb shell' can only take input up to a certain length, so it fails for
long executable names or when a large amount of arguments are passed to
the executable. This function attempts to limit the size of any string
passed to 'adb shell'.
2. 'adb shell' ignores the exit code of any command it runs. This function
therefore uses its own mechanisms to determine whether the executable
had a successful exit code when run on device.
"""
# We'll be running the executable in a temporary directory in
# /data/local/tmp. `adb shell` has trouble with commands that
# exceed a certain length, so to err on the safe side we only
# use the first 10 characters of the UUID.
uuid_dir = '{}/{}'.format(DEVICE_TEMP_DIR, str(uuid.uuid4())[:10])
shell(['mkdir', '-p', uuid_dir])
# `adb` can only handle commands under a certain length. That's why we
# hide the arguments and piping/status in executable files. However, at
# least one resilience test relies on checking the executable name, so we
# need to use the same name as the one provided.
executable_name = os.path.basename(executable_path)
executable = '{}/{}'.format(uuid_dir, executable_name)
push(executable_path, executable)
child_environment = ['{}="{}"'.format(k.replace(ENV_PREFIX, '', 1), v)
for (k, v) in os.environ.items()
if k.startswith(ENV_PREFIX)]
# The executables are sometimes passed arguments, and sometimes those
# arguments are files that have to be pushed, but also the argument values
# have to be changed to the new path in the Android device.
translated_executable_arguments = []
for executable_argument in executable_arguments:
# Currently we only support arguments that are file paths themselves.
# Things like `--foo=/path/to/file` or directories are not supported.
# Relative paths from the executable to the arguments are not kept.
if os.path.isfile(executable_argument):
final_path = '{}/{}'.format(uuid_dir,
os.path.basename(executable_argument))
push(executable_argument, final_path)
translated_executable_arguments.append(final_path)
else:
translated_executable_arguments.append(executable_argument)
# When running the executable on the device, we need to pass it the same
# arguments, as well as specify the correct LD_LIBRARY_PATH. Save these
# to a file we can easily call multiple times.
executable_with_args = '{}/__executable_with_args'.format(uuid_dir)
_create_executable_on_device(
executable_with_args,
'LD_LIBRARY_PATH={uuid_dir}:{tmp_dir} '
'{child_environment} {executable} {executable_arguments}'.format(
uuid_dir=uuid_dir,
tmp_dir=DEVICE_TEMP_DIR,
child_environment=' '.join(child_environment),
executable=executable,
executable_arguments=' '.join(translated_executable_arguments)))
# Write the output from the test executable to a file named '__stdout', and
# if the test executable succeeds, write 'SUCCEEDED' to a file
# named '__succeeded'. We do this because `adb shell` does not report
# the exit code of the command it executes on the device, so instead we
# check the '__succeeded' file for our string.
executable_stdout = '{}/__stdout'.format(uuid_dir)
succeeded_token = 'SUCCEEDED'
executable_succeeded = '{}/__succeeded'.format(uuid_dir)
executable_piped = '{}/__executable_piped'.format(uuid_dir)
_create_executable_on_device(
executable_piped,
'{executable_with_args} > {executable_stdout} && '
'echo "{succeeded_token}" > {executable_succeeded}'.format(
executable_with_args=executable_with_args,
executable_stdout=executable_stdout,
succeeded_token=succeeded_token,
executable_succeeded=executable_succeeded))
# We've pushed everything we need to the device.
# Now execute the wrapper script.
shell([executable_piped])
# Grab the results of running the executable on device.
stdout = shell(['cat', executable_stdout])
exitcode = shell(['cat', executable_succeeded])
if not exitcode.startswith(succeeded_token):
debug_command = '$ adb shell {}'.format(executable_with_args)
print('Executable exited with a non-zero code on the Android device.\n'
'Device stdout:\n'
'{stdout}\n'
'To debug, run:\n'
'{debug_command}\n'.format(
stdout=stdout,
debug_command=debug_command))
# Exit early so that the output isn't passed to FileCheck, nor are any
# temporary directories removed; this allows the user to re-run
# the executable on the device.
return 1
print(stdout, end='')
shell(['rm', '-rf', uuid_dir])
return 0
| {
"repo_name": "airspeedswift/swift",
"path": "utils/android/adb/commands.py",
"copies": "27",
"size": "8088",
"license": "apache-2.0",
"hash": 1750062538578977000,
"line_mean": 41.125,
"line_max": 79,
"alpha_frac": 0.6454005935,
"autogenerated": false,
"ratio": 4.31590181430096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
import subprocess
import tempfile
import uuid
# A temporary directory on the Android device.
DEVICE_TEMP_DIR = '/data/local/tmp'
def shell(args):
"""
Execute 'adb shell' with the given arguments.
Raise an exception if 'adb shell' returns a non-zero exit code.
Note that this only occurs if communication with the connected device
fails, not if the command run on the device fails.
"""
return subprocess.check_output(['adb', 'shell'] + args)
def rmdir(path):
"""Remove all files in the device directory at `path`."""
shell(['rm', '-rf', '{}/*'.format(path)])
def push(local_path, device_path):
"""Move the file at the given local path to the path on the device."""
return subprocess.check_output(['adb', 'push', local_path, device_path],
stderr=subprocess.STDOUT).strip()
def reboot():
"""Reboot the connected Android device, waiting for it to return online."""
subprocess.check_call(['adb', 'reboot'])
subprocess.check_call(['adb', 'wait-for-device'])
def _create_executable_on_device(device_path, contents):
_, tmp = tempfile.mkstemp()
with open(tmp, 'w') as f:
f.write(contents)
push(tmp, device_path)
shell(['chmod', '755', device_path])
def execute_on_device(executable_path, executable_arguments):
"""
Run an executable on an Android device.
Push an executable at the given 'executable_path' to an Android device,
then execute that executable on the device, passing any additional
'executable_arguments'. Return 0 if the executable succeeded when run on
device, and 1 otherwise.
This function is not as simple as calling 'adb shell', for two reasons:
1. 'adb shell' can only take input up to a certain length, so it fails for
long executable names or when a large amount of arguments are passed to
the executable. This function attempts to limit the size of any string
passed to 'adb shell'.
2. 'adb shell' ignores the exit code of any command it runs. This function
therefore uses its own mechanisms to determine whether the executable
had a successful exit code when run on device.
"""
# We'll be running the executable in a temporary directory in
# /data/local/tmp. `adb shell` has trouble with commands that
# exceed a certain length, so to err on the safe side we only
# use the first 10 characters of the UUID.
uuid_dir = '{}/{}'.format(DEVICE_TEMP_DIR, str(uuid.uuid4())[:10])
shell(['mkdir', '-p', uuid_dir])
# `adb` can only handle commands under a certain length. No matter what the
# original executable's name, on device we call it `__executable`.
executable = '{}/__executable'.format(uuid_dir)
push(executable_path, executable)
# When running the executable on the device, we need to pass it the same
# arguments, as well as specify the correct LD_LIBRARY_PATH. Save these
# to a file we can easily call multiple times.
executable_with_args = '{}/__executable_with_args'.format(uuid_dir)
_create_executable_on_device(
executable_with_args,
'LD_LIBRARY_PATH={uuid_dir}:{tmp_dir} '
'{executable} {executable_arguments}'.format(
uuid_dir=uuid_dir,
tmp_dir=DEVICE_TEMP_DIR,
executable=executable,
executable_arguments=' '.join(executable_arguments)))
# Write the output from the test executable to a file named '__stdout', and
# if the test executable succeeds, write 'SUCCEEDED' to a file
# named '__succeeded'. We do this because `adb shell` does not report
# the exit code of the command it executes on the device, so instead we
# check the '__succeeded' file for our string.
executable_stdout = '{}/__stdout'.format(uuid_dir)
succeeded_token = 'SUCCEEDED'
executable_succeeded = '{}/__succeeded'.format(uuid_dir)
executable_piped = '{}/__executable_piped'.format(uuid_dir)
_create_executable_on_device(
executable_piped,
'{executable_with_args} > {executable_stdout} && '
'echo "{succeeded_token}" > {executable_succeeded}'.format(
executable_with_args=executable_with_args,
executable_stdout=executable_stdout,
succeeded_token=succeeded_token,
executable_succeeded=executable_succeeded))
# We've pushed everything we need to the device.
# Now execute the wrapper script.
shell([executable_piped])
# Grab the results of running the executable on device.
stdout = shell(['cat', executable_stdout])
exitcode = shell(['cat', executable_succeeded])
if not exitcode.startswith(succeeded_token):
debug_command = '$ adb shell {}'.format(executable_with_args)
print('Executable exited with a non-zero code on the Android device.\n'
'Device stdout:\n'
'{stdout}\n'
'To debug, run:\n'
'{debug_command}\n'.format(
stdout=stdout,
debug_command=debug_command))
# Exit early so that the output isn't passed to FileCheck, nor are any
# temporary directories removed; this allows the user to re-run
# the executable on the device.
return 1
print(stdout)
shell(['rm', '-rf', uuid_dir])
return 0
| {
"repo_name": "bitjammer/swift",
"path": "utils/android/adb/commands.py",
"copies": "32",
"size": "6053",
"license": "apache-2.0",
"hash": -624846327794969900,
"line_mean": 38.8223684211,
"line_max": 79,
"alpha_frac": 0.6484387907,
"autogenerated": false,
"ratio": 4.259676284306826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 152
} |
# adb tools
import os
import platform
import libs.common_tools as U
system = platform.system()
if system is "Linux":
find_tool = 'grep'
else:
find_tool = 'findstr'
if "ANDROID_HOME" in os.environ:
if system is "Windows":
command = os.path.join(
os.environ['ANDROID_HOME'], 'platform-tools', 'adb.exe'
)
else:
command = os.path.join(
os.environ['ANDROID_HOME'], 'platform-tools', 'adb'
)
else:
raise EnvironmentError('$ANDROID_HOME path not found, check it.')
def get_android_platform_version():
"""
android平台版本号
:return:
"""
byte_version = U.bash('adb shell getprop ro.build.version.release').stdout.readline()
return byte_version.decode('utf-8').strip('\r\n')
def get_android_sdk_version():
"""
android sdk版本号
:return:
"""
byte_version = U.bash('adb shell getprop ro.build.version.sdk').stdout.readline()
return byte_version.decode('utf-8').strip('\r\n')
def get_app_cpu_info(condition):
"""
cpu使用情况
:param condition: 应用包名
:return:
"""
byte_cpu_info = U.bash("adb shell dumpsys cpuinfo | grep %s" % condition).stdout.readline()
return byte_cpu_info.decode('utf-8').strip('\r\n')
def get_app_mem_info(condition):
"""
内存使用情况
:param condition: 应用包名
:return:
"""
byte_mem_info = U.bash("adb shell dumpsys meminfo | grep %s" % condition).stdout.readline()
return byte_mem_info.decode('utf-8').strip('\r\n')
def install_app(path):
"""
从路径安装应用
:param path: apk所在路径
:return:
"""
U.bash("adb install %s" % path)
def uninstall_app(package):
"""
通过应用包名卸载应用
:param package: 应用包名
:return:
"""
U.bash("adb uninstall %s" % package)
def screen_cap(name):
"""
使用android adb shell内置截屏
:param name: 要截取的图片的名称
:return:
"""
path = "/sdcard/%s.png" % name
U.bash("adb shell screencap %s" % path)
U.sleep(2)
U.bash("adb pull %s" % path)
U.sleep(.5)
U.bash("adb shell rm -rf %s" % path)
if __name__ == '__main__':
pass
| {
"repo_name": "ZzZxL/tf",
"path": "libs/adb_tools.py",
"copies": "1",
"size": "2222",
"license": "apache-2.0",
"hash": 8743134193878870000,
"line_mean": 20.7708333333,
"line_max": 95,
"alpha_frac": 0.5942583732,
"autogenerated": false,
"ratio": 2.8907330567081604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.398499142990816,
"avg_score": null,
"num_lines": null
} |
'''ADC Device
===============
ADC implementation of :class:`~moa.device.Device`.
'''
from kivy.properties import (
DictProperty, BooleanProperty, NumericProperty, ObjectProperty,
ListProperty)
from moa.device import Device
from time import clock
from kivy.clock import Clock
__all__ = ('ADCPort', 'VirtualADCPort')
class ADCPort(Device):
'''Abstract class that represents a multi-channel ADC.
For ADCs whose channels are sampled independently, each should be given
their own :class:`ADCPort` instance. It typically only makes sense to
bundle multiple channels in one instance if they are sampled synchronously.
'''
timestamp = NumericProperty(0)
'''The timestamp of the last update to the :attr:`raw_data`, and
:attr:`data` attributes.
Typically, the rule is that :attr:`timestamp` and :attr:`ts_idx` are
updated before any of the `data` attributes ensuring that when
responding to a data update, :attr:`timestamp` is accurate.
The `on_data_update` event is fired after all the relevant channel data
has been updated.
:attr:`timestamp` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
.. note::
Typically, when activating, the :attr:`timestamp` is not updated.
'''
raw_data = ListProperty(None)
'''A list of length :attr:`num_channels` containing the raw data for each
channel. The structure is similar to :attr:`data`.
As opposed to :attr:`data` which keeps the data as floats representing the
actual signal being sensed, :attr:`raw_data` stores the data as a raw n-bit
unsigned integer. Each value in :attr:`data` is derived from an identical
element in :attr:`raw_data` using the following formula:
:attr:`data` = :attr:`raw_data` * :attr:`scale` / (2 ^ :attr:`bit_depth`
) - :attr:`offset`.
The reverse conversion: :attr:`raw_data` = (:attr:`data` + :attr:`offset`
) * (2 ^ :attr:`bit_depth`) / :attr:`scale`.
:attr:`raw_data` is a :class:`~kivy.properties.ListProperty` and
defaults to None.
'''
data = ListProperty(None)
'''A list of length :attr:`num_channels` containing the properly rescaled
floating point data for each channel.
Each element in the list is a list type containing the most recent data
read by the ADC. Rather than appending new data to old data, each new data
slice read replaces the previously read data so that :attr:`data` only
contains the most recently read data for each channel. Channels that are
not read at a particular update will be represented by a empty list type.
:attr:`data` is a :class:`~kivy.properties.ListProperty` and
defaults to None.
'''
ts_idx = ListProperty(None)
'''A list of length :attr:`num_channels`, where each element in the list
indicates the index in :attr:`data` and :attr:`raw_data` that is
timestamped by :attr:`timestamp`. The :attr:`timestamp` is the time of
a data point for each channel. This data point can be different for each
channel, so the index indicates the corresponding data point read at the
time of :attr:`timestamp`.
:attr:`ts_idx` is a :class:`~kivy.properties.ListProperty` and
defaults to None.
'''
active_channels = ListProperty(None)
'''A list of booleans with length :attr:`num_channels` indicating whether
each corresponding channel is active. Inactive channels are ones that don't
get data.
:attr:`active_channels` is a :class:`~kivy.properties.ListProperty`
and defaults to None.
'''
num_channels = NumericProperty(1)
'''The number of channels in the ADC.
:attr:`num_channels` is a :class:`~kivy.properties.NumericProperty`
and defaults to 1.
'''
bit_depth = NumericProperty(0)
'''The number of bits of :attr:`raw_data` data points. If zero, only
:attr:`data` is populated.
:attr:`bit_depth` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
scale = NumericProperty(1.)
'''The scale when converting :attr:`raw_data` to :attr:`data`.
:attr:`scale` is a :class:`~kivy.properties.NumericProperty`
and defaults to 1.0.
'''
offset = NumericProperty(0)
'''The offset when converting :attr:`raw_data` to :attr:`data`.
:attr:`offset` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.0.
'''
frequency = NumericProperty(0)
'''The frequency at which each ADC channel is sampled.
:attr:`frequency` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.0.
'''
class VirtualADCPort(ADCPort):
'''A virtual implementation of :class:`ADCPort`.
The class simulates an ADC device with channel data generation. The
:class:`ADCPort` conversion parameters must be initialized and once
activated will start updating the channel data using the :attr:`data_func`
callback.
For example::
>>> from moa.device.adc import VirtualADCPort
>>> from math import cos, pi
>>> class ADC(VirtualADCPort):
... def __init__(self, **kwargs):
... super(ADC, self).__init__(**kwargs)
... self.scale = 10
... self.offset = 5
...
... def next_point(idx, channel):
... rate = 1
... if channel == 0:
... return .2 * cos(2 * pi * idx * rate / \
float(self.frequency))
... return 4 * cos(2 * pi * idx * rate / float(\
self.frequency))
... self.data_func = next_point
...
... def on_data_update(self, *largs):
... print('Data ({:.2f}): {}'.format(self.timestamp, \
self.data))
... print('Raw data ({:.2f}): {}'.format(self.timestamp, \
self.raw_data))
>>> adc = ADC(num_channels=2, active_channels=[True, True], \
data_size=2, frequency=4, bit_depth=16)
>>> adc.activate(adc)
Data (1.26): [[0.2, 0.0], [4.0, 0.0]]
Raw data (1.26): [[34078, 32768], [58982, 32768]]
Data (1.52): [[-0.2, 0.0], [-4.0, 0.0]]
Raw data (1.52): [[31457, 32768], [6553, 32767]]
Data (1.77): [[0.2, 0.0], [4.0, 0.0]]
Raw data (1.77): [[34078, 32768], [58982, 32768]]
...
'''
data_func = ObjectProperty(None)
'''A callback that is called when we need a new data point. The callback
takes two parameters; a index parameter which is the index of the data
point requested, and the channel number for which the data is requested.
The function returns the generated sample value for that index and channel.
.. note::
The data returned is for :attr:`ADCPort.data`, :attr:`ADCPort.raw_data`
values are computed from it.
:attr:`data_func` is a :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
data_size = NumericProperty(0)
'''The number of data points generated at once. That is for every update
to the data parameters, :attr:`data_size` data points are generated. This
simulates the buffer size of an ADC which passes to the caller data points
in blocks of :attr:`data_size`.
.. note::
The data is generated in approximate real time according to
:attr:`data_size` and :attr:`ADCPort.frequency`.
:attr:`data_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
_count = 0 # the last index passed to data_func, set for each block
_start_time = 0 # the time when device was activated.
def _generate_data(self, *l):
t = clock()
i = self._count
count = self.data_size
# ensure it's time to update
if int((t - self._start_time) * self.frequency) - i < count:
return
f = self.data_func
n = self.num_channels
chs = self.active_channels
offset = self.offset
scale = self.scale
depth = 2 ** self.bit_depth
data = [[f(j + i, k) for j in range(count)] if chs[k] else []
for k in range(n)]
self._count = i + count
self.timestamp = t
self.ts_idx = [0, ] * n
if depth == 1:
if not self.raw_data:
self.raw_data = [[] for d in data]
else:
self.raw_data = [[int((val + offset) / scale * depth) for val in d]
for d in data]
self.data = data
self.dispatch('on_data_update', self)
def activate(self, *largs, **kwargs):
if not super(VirtualADCPort, self).activate(*largs, **kwargs):
return False
self._count = 0
self._start_time = clock()
f = self.data_size / float(self.frequency)
Clock.schedule_interval(self._generate_data, f / 2.)
return True
def deactivate(self, *largs, **kwargs):
if not super(VirtualADCPort, self).deactivate(*largs, **kwargs):
return False
Clock.unschedule(self._generate_data)
return True
| {
"repo_name": "matham/moa",
"path": "moa/device/adc.py",
"copies": "1",
"size": "9043",
"license": "mit",
"hash": -6651264604249573000,
"line_mean": 34.8849206349,
"line_max": 79,
"alpha_frac": 0.6199270154,
"autogenerated": false,
"ratio": 3.8188344594594597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9938177908099423,
"avg_score": 0.00011671335200746965,
"num_lines": 252
} |
"""A DCHP sniffer that listens for DHCP request messages"""
import socket
import time
from threading import Thread
from pydispatch import dispatcher
class DHCPListener(Thread):
"""The dhcp listener thread"""
def __init__(self):
"""Init Worker Thread Class."""
self.shutdown = False
dispatcher.connect(self.shutdown_signal,
signal="Shutdown",
sender=dispatcher.Any)
Thread.__init__(self)
def shutdown_signal(self, sender):
"""Shutsdown the thread"""
self.shutdown = True
def run(self):
"""Run Worker Thread."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
port = 67
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", port))
sock.setblocking(0)
except IOError:
# something has already used the port, its probably not us because
# multiple instances can run without error due to SO_REUSEADDR
dispatcher.send(signal="DHCPListener", sender=self, data={'Error': 'Socket in use'})
while not self.shutdown:
try:
msg, _ = sock.recvfrom(1024)
except Exception as error:
if error.args[0] == 10035:
# no data
# lets wait a bit and check again
time.sleep(1)
continue
else:
print("Error listening: ", error)
# check if it is a DHCP "request" message
try:
# print(".".join([str(item) for item in msg[240:243]]))
# print(msg[240:243] == b'\x35\x01\x03')
if msg[240:243] == b'\x35\x01\x03':
# extract the sending mac address
mac_address = ':'.join(['%02x' % item for item in msg[28:34]])
# process only the DHCP options portion of the packet
dhcp_options = msg[243:]
ip_address = ''
hostname = ''
# print(repr(dhcp_options))
# print('going into options')
while dhcp_options:
opt = dhcp_options[0]
if opt == 255: # end of packet
# print('found end')
break
if opt == 0: # padding in packet
dhcp_options = dhcp_options[1:] # move to the next byte
# print('found padding')
continue
if opt == 50: # requested IP
# print('found IP')
# We need to move to the data,
# and read the length of it
# convert what we got from hex to decimal and put into
# string with dots
data, dhcp_options = self.get_data(dhcp_options)
# length = dhcp_options[1]
# # Remove opt and length
# dhcp_options = dhcp_options[2:]
ip_address = '.'.join([str(item) for item in data])
# Remove ip
# dhcp_options = dhcp_options[length:]
continue
# hostname
if opt == 12:
# print('found hostname')
# convert what we got to a string
data, dhcp_options = self.get_data(dhcp_options)
# length = dhcp_options[1]
# dhcp_options = dhcp_options[2:]
hostname = ''.join([chr(item) for item in data])
# dhcp_options = dhcp_options[length:]
continue
# Unknown option -- skip it
data, dhcp_options = self.get_data(dhcp_options)
# length = dhcp_options[1]
# # Remove opt and length
# dhcp_options = dhcp_options[2:]
# print("data: ", repr(data))
# dhcp_options = dhcp_options[length:]
if ip_address == '':
# print('no ip skipping')
continue
# check if we have been told to stop listening
if not self.shutdown:
dispatcher.send(signal="Incoming DHCP", data=(hostname, mac_address, ip_address))
except Exception as error:
print('Error parsing DHCP packet: ', error)
def get_data(self, dhcp_options):
"""Gets variable length data, and returns data and left over options"""
length = dhcp_options[1]
data = dhcp_options[2:length + 2]
dhcp_options = dhcp_options[length + 2:]
return data, dhcp_options
def incoming(sender):
print(sender)
def main():
dispatcher.connect(incoming, signal="Incoming Packet", sender=dispatcher.Any)
test = DHCPListener()
test.start()
import time
time.sleep(20)
test.shutdown = True
test.join()
if __name__ == '__main__':
main()
| {
"repo_name": "AMXAUNZ/Magic-DXLink-Configurator",
"path": "scripts/dhcp_sniffer.py",
"copies": "1",
"size": "5493",
"license": "mit",
"hash": -4967445744880776000,
"line_mean": 37.6830985915,
"line_max": 105,
"alpha_frac": 0.458401602,
"autogenerated": false,
"ratio": 5.021023765996343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5979425367996344,
"avg_score": null,
"num_lines": null
} |
"""ADC module B2b interface
SPI Rack interface code for the ADC module. An 2 channel 24-bit ADC module
with integrated ARM Cortex M4 microcontroller. Used to connect to two neighbouring
IVVI rack modules
Example:
Example use: ::
B2b = spirack.B2b_module(SPI_Rack1, 1, True)
"""
import logging
from enum import Enum
from time import sleep
import numpy as np
from .chip_mode import SAMD51_MODE, SAMD51_SPEED, BICPINS_MODE, BICPINS_SPEED
logger = logging.getLogger(__name__)
class B2b_module(object):
def __init__(self, spi_rack, module, calibrate=False):
"""B2b module interface class
This class does the low level interfacing with the B2b module. When creating
an instance it requires a SPI_rack class passed as a parameter.
In contrast to the D4a module, a microcontroller in the module handles all the
communication with the ADCs. This allows for exactly timed ADC updates: based
on triggers, timers etc.
Attributes:
module (int): the module number set by the user (most coincide with the hardware)
calibrate (bool): if True, runs calibration at initialisation
"""
self.spi_rack = spi_rack
self.module = module
self.type = 'B2b'
self.sample_time = {'sinc3':sinc3_sample_time, 'sinc5': sinc5_sample_time}
self._command = B2b_Command
if calibrate:
self.calibrate()
def set_clock_source(self, source):
"""Sets the clock source for the microcontroller
Set the microcontroller clock source to either a local (inside the module)
clock or a clock from the backplane. This allows for multiple modules to run
of the same clock. The module expects a 10 MHz clock, either sine wave or square.
Args:
source (string): either 'internal' or 'external'. Clock source for the microcontroller
"""
possible_values = {'internal':0, 'external':1}
if source not in possible_values:
raise ValueError('{} module {}: value {} does not exist. Possible values '
'are: {}'.format(self.type, self.module, source, [*possible_values.keys()]))
command = self._command.CLOCK_SOURCE
header = 128 | command.value
length = 1
wdata = bytearray([header, length, possible_values[source]])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
sleep(0.1)
if self.get_clock_source() != source:
logger.error("%s module %d: clock source not set to %s clock source!",
self.type, self.module, source)
def get_clock_source(self):
"""Get the currently set clock source
Gets the set clock source from the microcontroller.
Returns:
The set clock source: 'internal' or 'external' (string)
"""
command = self._command.CLOCK_SOURCE
wdata = bytearray([command.value, 1, 0xFF, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
values = {0:'internal', 1:'external'}
return values[rdata[-1]]
def calibrate(self):
"""Run calibration routine
This will run a gain and offset calibration routine on the B2b module. The
function will stall until the routine is finished.
"""
command = self._command.ADC_CALIBRATE
header = (1<<7) | command.value
length = 1
wdata = bytearray([header, length, 0])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
logger.info(' %s module %d: Starting calibration...', self.type, self.module)
print(' {} module {}: Starting calibration...'.format(self.type, self.module))
sleep(4)
logger.info(' %s module %d: Finished calibration...', self.type, self.module)
print(' {} module {}: Finished calibration...'.format(self.type, self.module))
def is_running(self):
"""Checks if the module is running
This function return true if the module is running a measurement, should be used
to check if data can be read.
Returns:
True if the module is running a measurement
"""
data = self.spi_rack.read_data(self.module, 6, BICPINS_MODE, BICPINS_SPEED, bytearray([0]))
return bool(data[0]&0x01)
def _get_status(self):
"""Gets the status
Returns the status of the module. At bootup (before a first run) it will
give 'booted'. This should not appear after. The status can be used to
check where the module is in the process. Do not us this function to check
if the module is done running.
Returns:
Status of the module (string)
"""
command = self._command.STATUS_CMD
wdata = bytearray([command.value, 1, 0xFF, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
values = {0:'running', 1:'idle', 2:'waiting', 3:'booted', 4:'readout', 5:'cancelled', 6:'done'}
return values[rdata[-1]]
def set_trigger_amount(self, trigger_amount):
"""Sets the amount of triggers expected
Args:
trigger_amount (int): amount of triggers
"""
command = self._command.TRIGGER_AMOUNT
header = 128 | command.value
length = 4
wdata = bytearray([header, length, (trigger_amount>>24)&0xFF, (trigger_amount>>16)&0xFF, (trigger_amount>>8)&0xFF, trigger_amount&0xFF])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_trigger_amount(self):
"""Gets the amount of triggers expected
Returns:
amount of triggers
"""
command = self._command.TRIGGER_AMOUNT
wdata = bytearray([command.value, 4, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
trigger_amount = (rdata[-4]<<24) | (rdata[-3]<<16) | (rdata[-2]<<8) | (rdata[-1])
return trigger_amount
def set_sample_amount(self, ADC, sample_amount):
"""Sets the amount of samples per trigger
Sets the amount of samples that the ADC channel takes per trigger.
Args:
ADC (int:0-1): channel to set the sample amount of
sample_amount (int): amount of samples per trigger
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_SAMPLE_AMOUNT
header = 128 | command.value
length = 5
wdata = bytearray([header, length, ADC, (sample_amount>>24)&0xFF, (sample_amount>>16)&0xFF, (sample_amount>>8)&0xFF, sample_amount&0xFF])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_sample_amount(self, ADC):
"""Gets the amount of samples per trigger
Gets the amount of samples that the ADC channel takes per trigger.
Args:
ADC (int:0-1): channel of which to get the sample amount
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_SAMPLE_AMOUNT
wdata = bytearray([command.value, 4, ADC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
sample_amount = (rdata[-4]<<24) | (rdata[-3]<<16) | (rdata[-2]<<8) | (rdata[-1])
return sample_amount
def get_firmware_version(self):
"""Gets the firmware version of the module
Returns:
firmware version of the module (int)
"""
command = self._command.GET_FW_VERSION
wdata = bytearray([command.value, 1, 0xFF, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
return rdata[-1]
def set_ADC_enable(self, ADC, enable):
"""Enables given ADC channel
Args:
ADC (int:0-1): channel to activate
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
if enable not in range(2):
raise ValueError('{} module {}: {} not a valid input. Should be a boolean'.format(self.type, self.module, enable))
command = self._command.ADC_ENABLE
header = 128 | command.value
length = 2
wdata = bytearray([header, length, ADC, enable])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_ADC_enable(self, ADC):
"""Gets status of ADC channel
Args:
ADC (int:0-1): ADC of which to get the status
Returns:
status of ADC channel
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_ENABLE
wdata = bytearray([command.value, 1, ADC, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
return rdata[-1]
def software_trigger(self):
"""Triggers the ADC module
Sends a software trigger to the ADC module to take the amount of samples specified by
`set_sample_amount`. This can be used for example to take standalone measurements or to
take an FFT measurement.
"""
command = self._command.SOFTWARE_TRIGGER
header = 128 | command.value
wdata = bytearray([header, 1, 0])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def _get_ADC_data_loc(self, ADC):
"""Gets data location of final byte of last sample
Only for internal use!
"""
command = self._command.DATA_LOC
wdata = bytearray([command.value, 4, ADC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
data_loc = (rdata[-4]<<24) | (rdata[-3]<<16) | (rdata[-2]<<8) | (rdata[-1])
return data_loc
def get_data(self):
"""Reads back all the data from the module
Returns:
ADC0, ADC1: numpy arrays of float. None if ADC is not enabled
"""
ADC0, ADC1 = None, None
if self.get_ADC_enable(0):
# Get location of the last data byte in SRAM
max_data_location = self._get_ADC_data_loc(0)
# Create array with readout locations, for max 120 bytes at a time
# Start location for ADC 0 is 0
locations = np.arange(0, max_data_location, 120)
# Array with amounts of bytes per readout
amounts = np.zeros_like(locations)
amounts[:-1] = locations[1:] - locations[:-1]
amounts[-1] = max_data_location - locations[-1]
ADC0 = np.zeros(int(max_data_location/3))
# Readback the data in steps of max 120 bytes
for i, loc in enumerate(locations):
command = self._command.READ_LOC
header = 128 | command.value
wdata = bytearray([header, 4, (loc>>16)&0xFF, (loc>>8)&0xFF, loc&0xFF, amounts[i]])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
sleep(0)
command = self._command.GET_DATA
wdata = bytearray([command.value, amounts[i], 0, 0xFF]+[0xFF]*amounts[i])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
j=int(loc/3)
for n in range(4,len(rdata),3):
# Shift data in correct order
ADC0[j] = (rdata[n]<<16 | rdata[n+1]<<8 | rdata[n+2])
j+=1
# Calculate the ADC values
ADC0 = (ADC0*8.192/2**23) - 8.192
if self.get_ADC_enable(1):
# Get location of the last data byte in SRAM
max_data_location = self._get_ADC_data_loc(1)
# Create array with readout locations, for max 120 bytes at a time
# Start location for ADC 1 is 65536
locations = np.arange(62500, max_data_location, 120)
# Array with amounts of bytes per readout
amounts = np.zeros_like(locations)
amounts[:-1] = locations[1:] - locations[:-1]
amounts[-1] = max_data_location - locations[-1]
ADC1 = np.zeros(int((max_data_location-62500)/3))
# Readback the data in steps of max 120 bytes
for i, loc in enumerate(locations):
command = self._command.READ_LOC
header = 128 | command.value
wdata = bytearray([header, 4, (loc>>16)&0xFF, (loc>>8)&0xFF, loc&0xFF, amounts[i]])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
sleep(0)
command = self._command.GET_DATA
wdata = bytearray([command.value, amounts[i], 0, 0xFF]+[0xFF]*amounts[i])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
j=int((loc-62500)/3)
for n in range(4,len(rdata),3):
# Shift data in correct order
ADC1[j] = (rdata[n]<<16 | rdata[n+1]<<8 | rdata[n+2])
j+=1
# Calculate the ADC values
ADC1 = (ADC1*8.192/2**23) - 8.192
return ADC0, ADC1
def cancel(self):
"""Stops the module once it's running
When this function is called, it cancels the current run of the module. This can
be useful if the toggle amount and/or the toggle time are set wrong and long.
If the run gets cancelled, the status gets updated to reflect this.
"""
logger.info("%s module %d: cancelled measuring", self.type, self.module)
print("{} module {}: cancelled measuring".format(self.type, self.module))
command = self._command.CANCEL_CMD
header = 128 | command.value
length = 1
wdata = bytearray([header, length, 0])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def set_trigger_holdoff_time(self, holdoff_time):
"""Sets the holdoff time from the trigger moment
Sets the time the system waits after the trigger with a resolution of 100ns.
Args:
holdoff_time (seconds): amount of time to wait after
"""
# if holdoff_time < 30e-6:
# raise ValueError('{} module {}: holdoff time {} seconds not allowed. '
# 'Has to be mimimum 30 us.'.format(self.type, self.module, holdoff_time))
value = int((holdoff_time/100e-9))
command = self._command.TRIGGER_HOLDOFF
header = 128 | command.value
length = 4
wdata = bytearray([header, length, (value>>24)&0xFF, (value>>16)&0xFF,
(value>>8)&0xFF, value&0xFF])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_trigger_holdoff_time(self):
"""Gets the set trigger holdoff time
See 'set_trigger_holdoff_time' for details.
Returns:
The set holdoff_time in seconds.
"""
command = self._command.TRIGGER_HOLDOFF
wdata = bytearray([command.value, 4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
value = (rdata[-4]<<24) | (rdata[-3]<<16) | (rdata[-2]<<8) | (rdata[-1])
return round((value)*100e-9, 7)
def set_filter_rate(self, ADC, filter_rate):
"""Sets the ADC filter
The filter rate (together with the filter type) determines the cutoff frequency,
sample rate, the resolution and the 50 Hz rejection. See the filter table to
determine which setting to use.
Args:
ADC (int:0-1): ADC of which to change the filter
filter_rate (int:0-20): the desired filter setting
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
if filter_rate not in range(21):
raise ValueError('{} module {}: filter rate {} is not allowed.'.format(self.type, self.module, filter_rate))
command = self._command.ADC_FILTER_RATE
header = 128 | command.value
length = 2
wdata = bytearray([header, length, ADC, filter_rate])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_filter_rate(self, ADC):
"""Gets the ADC filter
Returns the ADC filter setting of the given ADC. See the filter table to interpret the result.
Args:
ADC (int:0-1): ADC of which to get the filter
Returns:
filter_rate (int): the current filter setting
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_FILTER_RATE
wdata = bytearray([command.value, 1, ADC, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
return rdata[-1]
def set_filter_type(self, ADC, filter_type):
"""Set the filter type
The ADC filter can be set to two different types: 'sinc3' or 'sinc5'. The filter type
determines (with the filter rate) the cutoff frequency, sample rate, the resolution
and the 50 Hz rejection. See the filter table to determine which setting is correct
for your application.
Args:
ADC (int:0-1): ADC of which to set the filter type
filter_type (string): possible values are 'sinc3' or 'sinc5'
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
possible_values = {'sinc3':3, 'sinc5':0}
if filter_type not in possible_values:
raise ValueError('{} module {}: filter type {} does not exist. Possible values '
'are: {}'.format(self.type, self.module, filter_type, [*possible_values.keys()]))
command = self._command.ADC_FILTER_TYPE
header = 128 | command.value
length = 2
wdata = bytearray([header, length, ADC, possible_values[filter_type]])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_filter_type(self, ADC):
"""Gets the filter type
Returns the filter type of the given ADC.
Args:
ADC (int:0-1): ADC of which to get the filter
Returns:
filter_type (string): the current filter type
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_FILTER_TYPE
wdata = bytearray([command.value, 1, ADC, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
type_values = {0:'sinc5', 3:'sinc3'}
return type_values[rdata[-1]]
def set_trigger_input(self, trigger):
"""Sets the trigger input location
Sets the trigger input location for the ADC module. If it is set to 'None', no external
triggers will start the module: it will only start via the `software_trigger` function.
Otherwise it will trigger on rising edges from either the controller module or the D5b.
Args:
trigger (string): the input location
"""
possible_values = {'None':0, 'Controller':1, 'D5b':2}
if trigger not in possible_values:
raise ValueError('{} module {}: trigger source {} does not exist. Possible values '
'are: {}'.format(self.type, self.module, trigger, [*possible_values.keys()]))
command = self._command.TRIGGER_INPUT
header = 128 | command.value
length = 1
wdata = bytearray([header, length, possible_values[trigger]])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_trigger_input(self):
"""Gets the trigger input location
Returns:
trigger_location (string): the currently set trigger input location
"""
command = self._command.TRIGGER_INPUT
wdata = bytearray([command.value, 1, 0xFF, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
trigger_values = {0:'None', 1:'Controller', 2:'D5b'}
return trigger_values[rdata[-1]]
def get_sample_time(self, ADC):
"""Gives the sample rate of the given ADC
Gives the sample rate in seconds of the ADC. This corresponds to the values in the
filter table. These values can be used for plotting or a FFT calculation.
Args:
ADC (int:0-1): ADC of which to get the sample time
Returns:
(float): the sample rate in seconds
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
filter_rate = self.get_filter_rate(ADC)
filter_type = self.get_filter_type(ADC)
return self.sample_time[filter_type][filter_rate]
class B2b_Command(Enum):
CLOCK_SOURCE = 0
TRIGGER_INPUT = 1
TRIGGER_HOLDOFF = 2
TRIGGER_AMOUNT = 3
GET_FW_VERSION = 4
GET_MODULE_NAME = 5
SOFTWARE_TRIGGER = 6
ADC_FILTER_RATE = 7
ADC_FILTER_TYPE = 8
ADC_ENABLE = 9
ADC_SAMPLE_AMOUNT = 10
ADC_CALIBRATE = 11
ADC_CONNECTION = 12
ADC_LOCATION = 13
STATUS_CMD = 14
CANCEL_CMD = 15
GET_DATA = 16
READ_LOC = 17
DATA_LOC = 18
sinc3_sample_time = [
12e-6,
24e-6,
48e-6,
60e-6,
96e-6,
120e-6,
192e-6,
300e-6,
600e-6,
1.2e-3,
3e-3,
6e-3,
7.5e-3,
15e-3,
30e-3,
50.02e-3,
60e-3,
150e-3,
180e-3,
300e-3,
600e-3
]
sinc5_sample_time = [
20e-6,
24e-6,
32e-6,
36e-6,
48e-6,
56e-6,
80e-6,
100e-6,
200e-6,
400e-6,
1e-3,
2e-3,
2.516e-3,
5e-3,
10e-3,
16.67e-3,
20.016e-3,
50e-3,
60.02e-3,
100e-3,
200e-3
] | {
"repo_name": "Rubenknex/SPI-rack",
"path": "spirack/B2b_module.py",
"copies": "2",
"size": "23197",
"license": "mit",
"hash": 9123332131648882000,
"line_mean": 36.1168,
"line_max": 145,
"alpha_frac": 0.582575333,
"autogenerated": false,
"ratio": 3.682063492063492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5264638825063492,
"avg_score": null,
"num_lines": null
} |
"""ADC module D4b interface
SPI Rack interface code for the ADC module. An 2 channel 24-bit ADC module
with integrated ARM Cortex M4 microcontroller. Used to connect to two neighbouring
IVVI rack modules. Is in hardware identical to the B2b module, the only difference
is the presence of connectors on the front of the module.
Example:
Example use: ::
D4b = spirack.D4b_module(SPI_Rack1, 1, True)
"""
import logging
from .B2b_module import B2b_module
from .chip_mode import SAMD51_MODE, SAMD51_SPEED
logger = logging.getLogger(__name__)
class D4b_module(B2b_module):
def __init__(self, spi_rack, module, calibrate=False):
"""D4b module interface class
This class does the low level interfacing with the B2b module. When creating
an instance it requires a SPI_rack class passed as a parameter.
In contrast to the D4a module, a microcontroller in the module handles all the
communication with the ADCs. This allows for exactly timed ADC updates: based
on triggers, timers etc.
Attributes:
module (int): the module number set by the user (most coincide with the hardware)
calibrate (bool): if True, runs calibration at initialisation
"""
super().__init__(spi_rack, module, calibrate)
self.type = 'D4b'
def set_input_location(self, ADC, location):
"""Sets the location for the given ADC input
Sets the ADC input location to back or front. Back is used to read out neighbouring
IVVI rack modules.
Args:
ADC (int:0-1): channel of which to set the input location
location (string): back or front
"""
possible_values = {'back': 0, 'front': 1}
if location not in possible_values:
raise ValueError('{} module {}: value {} does not exist. Possible values '
'are: {}'.format(self.type, self.module, location, [*possible_values.keys()]))
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_LOCATION
header = 128 | command.value
length = 2
wdata = bytearray([header, length, ADC, possible_values[location]])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_input_location(self, ADC):
"""Gets the location for the given ADC
Args:
ADC (int:0-1): channel of which to get the input location
Returns:
input location of the ADC (string)
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_LOCATION
wdata = bytearray([command.value, 1, ADC, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
values = {0: 'back', 1: 'front'}
return values[rdata[-1]]
def set_input_connection(self, ADC, connection_type):
"""Sets the connection type for the given ADC input
Sets the ADC input to either single ended or differential. For back connections to the
IVVI Rack it should always be set to differential.
Args:
ADC (int:0-1): channel of which to set the connection type
connection_type (string): single or differential
"""
possible_values = {'single': 0, 'differential': 1}
if connection_type not in possible_values:
raise ValueError('{} module {}: value {} does not exist. Possible values '
'are: {}'.format(self.type, self.module, connection_type, [*possible_values.keys()]))
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_CONNECTION
header = 128 | command.value
length = 2
wdata = bytearray([header, length, ADC, possible_values[connection_type]])
self.spi_rack.write_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
def get_input_connection(self, ADC):
"""Gets the connection type for the given ADC
Args:
ADC (int:0-1): channel of which to get the connection type
Returns:
connection type of the ADC (string)
"""
if ADC not in range(2):
raise ValueError('{} module {}: ADC {} does not exist.'.format(self.type, self.module, ADC))
command = self._command.ADC_CONNECTION
wdata = bytearray([command.value, 1, ADC, 0xFF, 0xFF])
rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)
values = {0: 'single', 1: 'differential'}
return values[rdata[-1]]
| {
"repo_name": "Rubenknex/SPI-rack",
"path": "spirack/D4b_module.py",
"copies": "2",
"size": "4976",
"license": "mit",
"hash": 3723048741171442000,
"line_mean": 39.7868852459,
"line_max": 114,
"alpha_frac": 0.6117363344,
"autogenerated": false,
"ratio": 3.958631662688942,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5570367997088942,
"avg_score": null,
"num_lines": null
} |
"""ADC module D4 interface
SPI Rack interface code for the D4 module.
Example:
Example use: ::
D4 = spirack.D4_modules(SPI_Rack1, 5)
"""
from .chip_mode import AD7175_MODE, AD7175_SPEED
class D4_module(object):
"""D4 module interface class
This class does the low level interfacing with the D4 module. When creating
an instance, it requires a SPI_rack class passed as a parameter.
The module contains two independent 24-bit analog to digital converters. They
can be individually configured and triggered. The filter settings determine
the data rate and resolution. For an overview of settings, see the website.
Attributes:
module (int): module number set by the user (must coincide with hardware)
filter_setting (int): current filter setting
filter_type (string): filter type, either sinc3 or sinc5
"""
def __init__(self, spi_rack, module):
"""Inits D4 module class
The D4_module class needs an SPI_rack object at initiation. All
communication will run via that class. At initialization the ADC filters
will be set to 'sinc3' at 16.67 SPS.
Args:
spi_rack (SPI_rack object): SPI_rack class object via which the communication runs
module (int): module number set on the hardware
"""
self.module = module
self.spi_rack = spi_rack
self.reg = AD7175_registers
self.filter_setting = None
self.filter_type = None
self._default_setup()
for adc in range(0, 2):
# Set filter to sinc3 and 16.67 SPS as default
self.set_filter(adc, 'sinc3', 16)
def set_filter(self, adc, filter_type, filter_setting):
"""Sets the ADC filter
The two filter parameters determine the filter response (cutoff frequency),
the 50 Hz rejection and the resolution. See the filter table on the website
to determine which setting is correct for your application.
Args:
adc (int:0-1): ADC inside the module which needs to be set
filter_type (string): set to sinc3 or sinc5
filter_setting (int:0-20): the desired filter setting
"""
filter_values = {'sinc3': 3, 'sinc5': 0}
if filter_type not in filter_values:
raise ValueError('Value {} does not exist. Possible values are: {}'.format(filter_type, filter_values))
if filter_setting not in range(21):
raise ValueError('Value {} not allowed. Possible values are from 0 to 20.'.format(filter_setting))
self._write_data_16(adc, self.reg.FILTCON0_REG,
(filter_values[filter_type]<<self.reg.ORDER0) |
(filter_setting<<self.reg.ODR))
self.filter_setting = filter_setting
self.filter_type = filter_type
def single_conversion(self, adc):
"""Perform a conversion
Performs a conversion on the given ADC. It will both trigger the ADC and
wait for the result. Because of this it will block all other operations.
Args:
adc (int:0-1): ADC to perform the conversion
"""
# Reset filter
self._write_data_16(adc, self.reg.adcMODE_REG, (0<<self.reg.MODE) | (1<<self.reg.SING_CYC))
while True:
status = self._read_data(adc, self.reg.STATUS_REG, 1)
# if new data available:
if (status[0]&0x80) == 0:
# Get raw data, shift to correct place and convert to voltage
raw_data = self._read_data(adc, self.reg.DATA_REG, 3)
raw_data = raw_data[1:]
raw_data_val = raw_data[0] << 16 | raw_data[1] << 8 | raw_data[2]
# 2.5 Volt reference with factor 2 compensation for divider
# gain set such that +-4V is full scale
return (raw_data_val * 2 / 2**22) - 4.0
def start_conversion(self, adc):
"""Trigger a conversion
Triggers a conversion on the given ADC. Does not wait for the result. This
should be used if multiple devices/adcs need triggering. After the conversion
is done it will immediately continue doing conversions and updating the
output.
Args:
adc (int:0-1): ADC to perform the conversion
"""
# Reset filter
self._write_data_16(adc, self.reg.adcMODE_REG, (0<<self.reg.MODE) | (1<<self.reg.SING_CYC))
def get_result(self, adc):
"""Returns the result of a conversion
Returns the result from a triggered conversion. The function will wait until the
result is present, therefore it will block all other operations.
It will return the last conversion result. If the time between the trigger and
getting the result is too long, the result will be of a second/third conversion.
The ADC keeps converting and updating the data output.
Args:
adc (int:0-1): ADC to readout
Returns:
ADC measured voltage (float)
"""
while True:
status = self._read_data(adc, self.reg.STATUS_REG, 1)
# if new data available:
if (status[0]&0x80) == 0:
# Get raw data, shift to correct place and convert to voltage
raw_data = self._read_data(adc, self.reg.DATA_REG, 3)
raw_data = raw_data[1:]
raw_data_val = raw_data[0] << 16 | raw_data[1] << 8 | raw_data[2]
# 2.5 Volt reference with factor 2 compensation for divider
return (raw_data_val * 2 / 2**22) - 4.0
def offset_calibration(self, adc):
"""Offset voltage calibration routine
Calibrates the offset of the given ADC input. To run this routine, put
a short or 50 Ohm short on the input of the given ADC.
Args:
adc (int:0-1): ADC to calibrate
"""
print('Make sure that ADC input {} is terminated with a short or 50 Ohm '
'while running this calibration!'.format(adc+1))
filter_setting = self.filter_setting
filter_type = self.filter_type
# set to best performance for offset calibration
self.set_filter(adc, 'sinc3', 20)
self._write_data_16(adc, self.reg.adcMODE_REG, (6<<self.reg.MODE) | (1<<self.reg.SING_CYC))
running = True
while running:
status = self._read_data(adc, self.reg.STATUS_REG, 1)
# if new data available:
if (status[0]&0x80) == 0:
running = False
self._write_data_16(adc, self.reg.adcMODE_REG, (0<<self.reg.MODE) | (1<<self.reg.SING_CYC))
# set to back to previous setting
self.set_filter(adc, filter_type, filter_setting)
def gain_calibration(self, adc):
"""Gain calibration routine
Calibrates the gain of the given ADC input. To run this routine, put
4V on the input of the given ADC using a D5a.
Args:
adc (int:0-1): ADC to calibrate
"""
print('Make sure that ADC input {} is set to 4V (using a D5a)!'.format(adc+1))
filter_setting = self.filter_setting
filter_type = self.filter_type
# set to best performance for offset calibration
self.set_filter(adc, 'sinc3', 20)
self._write_data_16(adc, self.reg.adcMODE_REG, (7<<self.reg.MODE) | (1<<self.reg.SING_CYC))
running = True
while running:
status = self._read_data(adc, self.reg.STATUS_REG, 1)
# if new data available:
if (status[0]&0x80) == 0:
running = False
self._write_data_16(adc, self.reg.adcMODE_REG, (0<<self.reg.MODE) | (1<<self.reg.SING_CYC))
# Correction found by measuring with HP3458A 8.5 digit DMM
# This correction worked quite well with 4 chips tested.
gain_val = 10900
rdata = self._read_data(adc, self.reg.GAIN0_REG, 3)[1:]
value = rdata[0]<<16 | rdata[1]<<8 | rdata[0]
self._write_data_24(adc, self.reg.GAIN0_REG, value-gain_val)
# set to back to previous setting
self.set_filter(adc, filter_type, filter_setting)
def _default_setup(self):
# Basic configuration
for adc in range(0, 2):
# Disable internal ref, set continuous conversion mode, internal clock and single cycle
# Single cycle only outputs data when filter is settled
self._write_data_16(adc, self.reg.adcMODE_REG,
(0<<self.reg.MODE) | (1<<self.reg.SING_CYC))
self._write_data_16(adc, self.reg.IFMODE_REG, (1<<self.reg.DOUT_RESET))
# Enable only channel 0, but set all to setup 0 and AIN4 as AINNEG
# with respective AINPOS
self._write_data_16(adc, self.reg.CH0_REG,
(1<<self.reg.CH_EN) |
(0<<self.reg.SETUP_SEL) |
(self.reg.AIN0<<self.reg.AINPOS) |
(self.reg.AIN4<<self.reg.AINNEG))
self._write_data_16(adc, self.reg.CH1_REG,
(0<<self.reg.CH_EN) |
(0<<self.reg.SETUP_SEL) |
(self.reg.AIN1<<self.reg.AINPOS) |
(self.reg.AIN4<<self.reg.AINNEG))
self._write_data_16(adc, self.reg.CH2_REG,
(0<<self.reg.CH_EN) |
(0<<self.reg.SETUP_SEL) |
(self.reg.AIN2<<self.reg.AINPOS) |
(self.reg.AIN4<<self.reg.AINNEG))
self._write_data_16(adc, self.reg.CH3_REG,
(0<<self.reg.CH_EN) |
(0<<self.reg.SETUP_SEL) |
(self.reg.AIN3<<self.reg.AINPOS) |
(self.reg.AIN4<<self.reg.AINNEG))
# Enable input buffers, disable reference buffers, external reference
self._write_data_16(adc, self.reg.SETUPCON0_REG,
(1<<self.reg.BI_UNIPOLAR) |
(1<<self.reg.AINBUF0P) |
(1<<self.reg.AINBUF0M))
# Set the gain such that +-4V is full scale. Can be overwritten by calibration
self._write_data_24(adc, self.reg.GAIN0_REG, 6988566)
self._write_data_24(adc, self.reg.GAIN1_REG, 6988566)
self._write_data_24(adc, self.reg.GAIN2_REG, 6988566)
self._write_data_24(adc, self.reg.GAIN3_REG, 6988566)
def _read_data(self, adc, reg, no_bytes):
"""
Read a given number of bytes (no_bytes) from given adc register
"""
s_data = bytearray([reg | (1<<6)] + no_bytes*[0])
r_i_data = self.spi_rack.read_data(self.module, adc, AD7175_MODE, AD7175_SPEED, s_data)
return r_i_data
def _write_data_8(self, adc, reg, data):
s_data = bytearray([reg, data])
self.spi_rack.write_data(self.module, adc, AD7175_MODE, AD7175_SPEED, s_data)
def _write_data_16(self, adc, reg, data):
s_data = bytearray([reg, data>>8, data&0xFF])
self.spi_rack.write_data(self.module, adc, AD7175_MODE, AD7175_SPEED, s_data)
def _write_data_24(self, adc, reg, data):
s_data = bytearray([reg, data>>16, (data>>8)&0xFF, data&0xFF])
self.spi_rack.write_data(self.module, adc, AD7175_MODE, AD7175_SPEED, s_data)
class AD7175_registers:
"""AD7175 register class
A list of all the register names with values and all bits with corresponding
locations in the registers.
"""
# adc register locations
STATUS_REG = 0x00
adcMODE_REG = 0x01
IFMODE_REG = 0x02
REGCHECK_REG = 0x03
DATA_REG = 0x04
GPIOCON_REG = 0x06
ID_REG = 0x07
CH0_REG = 0x10
CH1_REG = 0x11
CH2_REG = 0x12
CH3_REG = 0x13
SETUPCON0_REG = 0x20
SETUPCON1_REG = 0x21
SETUPCON2_REG = 0x22
SETUPCON3_REG = 0x23
FILTCON0_REG = 0x28
FILTCON1_REG = 0x28
FILTCON2_REG = 0x28
FILTCON3_REG = 0x28
OFFSET0_REG = 0x30
OFFSET1_REG = 0x30
OFFSET2_REG = 0x30
OFFSET3_REG = 0x30
GAIN0_REG = 0x38
GAIN1_REG = 0x38
GAIN2_REG = 0x38
GAIN3_REG = 0x38
# Status Register bits
nRDY = 7
adc_ERROR = 6
CRC_ERROR = 5
REG_ERROR = 4
CHANNEL = 0
# adc Mode Register bits
REF_EN = 15
HIDE_DELAY = 14
SING_CYC = 13
DELAY = 8
MODE = 4
CLOCKSEL = 2
# IFMODE Register bits
ALT_SYNC = 12
IOSTRENGTH = 11
DOUT_RESET = 8
CONTREAD = 7
DATA_STAT = 6
REG_CHECK = 5
CRC_EN = 2
WL16 = 0
# GPIOCON Register bits
MUX_IO = 12
SYNC_EN = 11
ERR_EN = 9
ERR_DAT = 8
IP_EN1 = 5
IP_EN0 = 4
OP_EN1 = 3
OP_EN0 = 2
GP_DATA1 = 1
GP_DATA0 = 0
# Channel Registers bits
CH_EN = 15
SETUP_SEL = 12
AINPOS = 5
AINNEG = 0
# Setup Configuration Register bits
BI_UNIPOLAR = 12
REFBUF0P = 11
REFBUF0M = 10
AINBUF0P = 9
AINBUF0M = 8
REF_SEL = 4
# Filter Configuration Register bits
SINC3_MAP0 = 15
ENHFILTEN = 11
ENHFILT = 8
ORDER0 = 5
ODR = 0
# adc register values
AIN0 = 0
AIN1 = 1
AIN2 = 2
AIN3 = 3
AIN4 = 4
REFP = 21
REFN = 22
| {
"repo_name": "Rubenknex/SPI-rack",
"path": "spirack/D4_module.py",
"copies": "2",
"size": "13448",
"license": "mit",
"hash": 1818657467135176400,
"line_mean": 34.5767195767,
"line_max": 115,
"alpha_frac": 0.5726502082,
"autogenerated": false,
"ratio": 3.5039082855653985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5076558493765398,
"avg_score": null,
"num_lines": null
} |
# adc.py
# Part of PyBBIO
# github.com/graycatlabs/PyBBIO
# MIT License
#
# Beaglebone ADC driver for kernels >= 3.8.
#
# Just a wrapper for the sysfs ADC driver for the time being.
import os, glob, bbio
import cape_manager
from config import ADC, ADC_ENABLE_DTS_OVERLAY
_ADC_INITIALIZED = False
def analog_init():
""" Initializes the on-board 8ch 12bit ADC. """
pass
def analog_cleanup():
pass
def analogRead(adc_pin):
""" Returns voltage read on given analog input pin. If passed one of
PyBBIO's AIN0-AIN5 keywords the voltage will be returned in millivolts.
May also be passed the path to an AIN file as created by a cape overlay,
in which case the value will be returned as found in the file. """
global _ADC_INITIALIZED
if not _ADC_INITIALIZED:
cape_manager.load(ADC_ENABLE_DTS_OVERLAY, auto_unload=False)
# Don't unload the overlay on exit for now because it can
# cause kernel panic.
bbio.delay(100)
_ADC_INITIALIZED = True
if adc_pin in ADC:
adc_pin = ADC[adc_pin]
adc_file = glob.glob(adc_pin[0])
if len(adc_file) == 0:
overlay = adc_pin[1]
# Overlay not loaded yet
cape_manager.load(overlay, auto_unload=False)
adc_file = glob.glob(adc_pin[0])
else:
adc_file = glob.glob(adc_pin)
if len(adc_file) == 0:
raise Exception('*Could not load overlay for adc_pin: %s' % adc_pin)
adc_file = adc_file[0]
# Occasionally the kernel will be writing to the file when you try
# to read it, to avoid IOError try up to 5 times:
for i in range(5):
try:
with open(adc_file, 'rb') as f:
val = f.read()
return int(val)
except IOError:
continue
raise Exception('*Could not open AIN file: %s' % adc_file)
def inVolts(mv):
""" Converts millivolts to volts... you know, to keep the API
consistent. """
return mv/1000.0
| {
"repo_name": "ims-tyler/PyBBIO",
"path": "bbio/platform/beaglebone/adc.py",
"copies": "3",
"size": "1879",
"license": "mit",
"hash": -646001964217655200,
"line_mean": 28.359375,
"line_max": 78,
"alpha_frac": 0.6620542842,
"autogenerated": false,
"ratio": 3.0853858784893267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5247440162689326,
"avg_score": null,
"num_lines": null
} |
"""Add 1"/hr over one day storms."""
import sys
import os
from datetime import date
from pyiem.util import get_dbconn
from pyiem.dep import read_cli
from pandas.io.sql import read_sql
from tqdm import tqdm
MYHUCS = [x.strip() for x in open("myhucs.txt")]
def do(origfn, scenario, numstorms):
"""Work we do."""
if not os.path.isfile(origfn):
return False
newfn = origfn.replace("/0/", f"/{scenario}/")
newdir = os.path.dirname(newfn)
if not os.path.isdir(newdir):
os.makedirs(newdir)
clidf = read_cli(origfn)
# Find all spring dates without precipitation
df = clidf[
(
clidf.index.month.isin([3, 4, 5])
& (clidf["bpcount"] == 0)
& (clidf["tmin"] > 0)
)
]
# For each year, pick X number of rows
df2 = df.groupby(df.index.year).apply(lambda d: d.sample(numstorms))
dates = df2.reset_index()["level_1"].dt.date.values
# Edit the new climate file
with open(newfn, "w") as fh:
lines = open(origfn).readlines()
linenum = 0
while linenum < len(lines):
if linenum < 15:
fh.write(lines[linenum])
linenum += 1
continue
tokens = lines[linenum].strip().split("\t")
breakpoints = int(tokens[3])
valid = date(int(tokens[2]), int(tokens[1]), int(tokens[0]))
if valid in dates:
tokens[3] = "5"
fh.write("\t".join(tokens) + "\n")
linenum += 1
# Add the storm
fh.write("12.00 0.00\n")
fh.write("12.25 6.35\n")
fh.write("12.50 12.70\n")
fh.write("12.75 19.05\n")
fh.write("13.00 25.40\n")
else:
fh.write(lines[linenum])
linenum += 1
for _i in range(breakpoints):
fh.write(lines[linenum])
linenum += 1
return True
def main(argv):
"""Go Main Go."""
scenario = int(argv[1])
numstorms = int(argv[2])
# Find the climate files we need to edit
df = read_sql(
"SELECT distinct climate_file from flowpaths where scenario = 0 and "
"huc_12 in %s",
get_dbconn("idep"),
params=(tuple(MYHUCS),),
index_col=None,
)
# Create directories as needed
progress = tqdm(df["climate_file"].values)
failed = 0
for origfn in progress:
progress.set_description(origfn.split("/")[-1])
if not do(origfn, scenario, numstorms):
failed += 1
print(f"{failed}/{len(df.index)} runs failed.")
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/idep",
"path": "scripts/climatechange/addstorms_cli.py",
"copies": "2",
"size": "2708",
"license": "mit",
"hash": -5955747885565635000,
"line_mean": 29.7727272727,
"line_max": 77,
"alpha_frac": 0.5284342688,
"autogenerated": false,
"ratio": 3.5123216601815823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 88
} |
"""Add 3 digit version of all 2 digit IRONMAN identifiers
Revision ID: 265e7dc4c1a5
Revises: 2e1421ac841a
Create Date: 2019-04-23 11:29:22.633160
"""
import re
from portal.database import db
from portal.models.identifier import Identifier
from portal.models.organization import OrganizationIdentifier
from portal.models.user import UserIdentifier
from portal.system_uri import TRUENTH_EXTERNAL_STUDY_SYSTEM
# revision identifiers, used by Alembic.
revision = '265e7dc4c1a5'
down_revision = '2e1421ac841a'
org_pattern = re.compile(r'^146-(\d\d)$')
study_pattern = re.compile(r'^170-(\d\d)-(\d\d\d)$')
def upgrade():
# All IRONMAN orgs need a 3 digit version
IRONMAN_system = 'http://pcctc.org/'
ironman_org_ids = [(id.id, id._value) for id in Identifier.query.filter(
Identifier.system == IRONMAN_system).with_entities(
Identifier.id, Identifier._value)]
existing_values = [id[1] for id in ironman_org_ids]
replacements = {}
for io_id, io_value in ironman_org_ids:
found = org_pattern.match(io_value)
if found:
# avoid probs if run again - don't add if already present
needed = '146-0{}'.format(found.group(1))
if needed in existing_values:
continue
replacements[found.group(1)] = '0{}'.format(found.group(1))
# add a 3 digit identifier and link with same org
oi = OrganizationIdentifier.query.filter(
OrganizationIdentifier.identifier_id == io_id).one()
needed_i = Identifier(
use='secondary', system=IRONMAN_system, _value=needed)
needed_oi = OrganizationIdentifier(
organization_id=oi.organization_id, identifier=needed_i)
db.session.add(needed_oi)
# All IRONMAN users with a 2 digit ID referencing one of the replaced
# values needs a 3 digit version
ironman_study_ids = Identifier.query.filter(
Identifier.system == TRUENTH_EXTERNAL_STUDY_SYSTEM).filter(
Identifier._value.like('170-%')).with_entities(
Identifier.id, Identifier._value)
for iid, ival in ironman_study_ids:
found = study_pattern.match(ival)
if found:
org_segment = found.group(1)
patient_segment = found.group(2)
# only add if also one of the new org ids
if org_segment not in replacements:
continue
needed = '170-{}-{}'.format(
replacements[org_segment], patient_segment)
# add a 3 digit identifier and link with same user(s)
uis = UserIdentifier.query.filter(
UserIdentifier.identifier_id == iid)
needed_i = Identifier(
use='secondary', system=TRUENTH_EXTERNAL_STUDY_SYSTEM,
_value=needed)
for ui in uis:
needed_ui = UserIdentifier(
user_id=ui.user_id, identifier=needed_i)
db.session.add(needed_ui)
db.session.commit()
def downgrade():
pass
| {
"repo_name": "uwcirg/true_nth_usa_portal",
"path": "portal/migrations/versions/265e7dc4c1a5_.py",
"copies": "1",
"size": "3066",
"license": "bsd-3-clause",
"hash": -3668357799552844000,
"line_mean": 33.4494382022,
"line_max": 76,
"alpha_frac": 0.6242661448,
"autogenerated": false,
"ratio": 3.7573529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881619085976471,
"avg_score": 0,
"num_lines": 89
} |
"""Add 3DMark06 Result
Revision ID: ad580d7b4f2a
Revises: 0959737bcdfd
Create Date: 2016-10-23 13:47:14.849821
"""
# revision identifiers, used by Alembic.
revision = 'ad580d7b4f2a'
down_revision = '0959737bcdfd'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic ###
op.create_table('futuremark3dmark06results',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('result_date', sa.DateTime(), nullable=True),
sa.Column('sm2_score', sa.Integer(), nullable=True),
sa.Column('cpu_score', sa.Integer(), nullable=True),
sa.Column('sm3_score', sa.Integer(), nullable=True),
sa.Column('proxcyon_fps', sa.Numeric(precision=5, scale=2), nullable=True),
sa.Column('fireflyforest_fps', sa.Numeric(precision=5, scale=2), nullable=True),
sa.Column('cpu1_fps', sa.Numeric(precision=5, scale=2), nullable=True),
sa.Column('cpu2_fps', sa.Numeric(precision=5, scale=2), nullable=True),
sa.Column('canyonflight_fps', sa.Numeric(precision=5, scale=2), nullable=True),
sa.Column('deepfreeze_fps', sa.Numeric(precision=5, scale=2), nullable=True),
sa.Column('overall_score', sa.Integer(), nullable=True),
sa.Column('result_url', sa.String(), nullable=True),
sa.Column('revision_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['revision_id'], ['revisions.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_futuremark3dmark06results_cpu_score'), 'futuremark3dmark06results', ['cpu_score'], unique=False)
op.create_index(op.f('ix_futuremark3dmark06results_overall_score'), 'futuremark3dmark06results', ['overall_score'], unique=False)
op.create_index(op.f('ix_futuremark3dmark06results_result_date'), 'futuremark3dmark06results', ['result_date'], unique=False)
op.create_index(op.f('ix_futuremark3dmark06results_sm2_score'), 'futuremark3dmark06results', ['sm2_score'], unique=False)
op.create_index(op.f('ix_futuremark3dmark06results_sm3_score'), 'futuremark3dmark06results', ['sm3_score'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic ###
op.drop_index(op.f('ix_futuremark3dmark06results_sm3_score'), table_name='futuremark3dmark06results')
op.drop_index(op.f('ix_futuremark3dmark06results_sm2_score'), table_name='futuremark3dmark06results')
op.drop_index(op.f('ix_futuremark3dmark06results_result_date'), table_name='futuremark3dmark06results')
op.drop_index(op.f('ix_futuremark3dmark06results_overall_score'), table_name='futuremark3dmark06results')
op.drop_index(op.f('ix_futuremark3dmark06results_cpu_score'), table_name='futuremark3dmark06results')
op.drop_table('futuremark3dmark06results')
### end Alembic commands ###
| {
"repo_name": "rivalrockets/rivalrockets-api",
"path": "migrations/versions/ad580d7b4f2a_add_3dmark06_result.py",
"copies": "2",
"size": "2782",
"license": "mit",
"hash": -3837390896411258000,
"line_mean": 51.4905660377,
"line_max": 133,
"alpha_frac": 0.7221423436,
"autogenerated": false,
"ratio": 3.053787047200878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4775929390800879,
"avg_score": null,
"num_lines": null
} |
"""Add about_pages table
Revision ID: 4c236d4eac4
Revises: 55a2c1bad7
Create Date: 2015-04-22 23:45:51.210213
"""
# revision identifiers, used by Alembic.
revision = '4c236d4eac4'
down_revision = '55a2c1bad7'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'about_pages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('navbar_section', sa.String(length=255), nullable=False),
sa.Column('slug', sa.String(length=255), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint(
'navbar_section', 'slug', 'event_id',
name='ix_about_pages_navbar_section_slug_event_id',
),
)
def downgrade():
op.drop_table('about_pages')
| {
"repo_name": "djds23/pygotham-1",
"path": "migrations/versions/4c236d4eac4_add_about_pages_table.py",
"copies": "3",
"size": "1086",
"license": "bsd-3-clause",
"hash": 4484492885169358300,
"line_mean": 27.5789473684,
"line_max": 75,
"alpha_frac": 0.6279926335,
"autogenerated": false,
"ratio": 3.331288343558282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5459280977058282,
"avg_score": null,
"num_lines": null
} |
"""Add accepted_by_customer
Revision ID: 3183d344740d
Revises: 4190b0aefe23
Create Date: 2015-06-30 15:00:54.718273
"""
# revision identifiers, used by Alembic.
revision = '3183d344740d'
down_revision = '4190b0aefe23'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
old_options = ( 'emitted', 'received',
'sent_to_operator', 'received_by_operator',
'received_by_taxi',
'accepted_by_taxi',
'declined_by_taxi', 'declined_by_customer',
'incident_customer', 'incident_taxi',
'timeout_customer', 'timeout_taxi',
'outdated_customer', 'outdated_taxi', 'failure')
new_options = sorted(old_options + ('accepted_by_customer',))
old_type = sa.Enum(*old_options, name='status')
new_type = sa.Enum(*new_options, name='status')
tmp_type = sa.Enum(*new_options, name='_status')
tcr = sa.sql.table('hail',
sa.Column('status', new_type, nullable=False))
def upgrade():
op.execute('COMMIT')
op.execute("ALTER TYPE hail_status ADD value 'accepted_by_customer' after 'accepted_by_taxi';")
def downgrade():
pass
| {
"repo_name": "openmaraude/APITaxi_models",
"path": "APITaxi_models2/migrations/versions/20150630_15:00:54_3183d344740d_add_accepted_by_customer.py.py",
"copies": "4",
"size": "1109",
"license": "mit",
"hash": -9207266718272020000,
"line_mean": 27.4358974359,
"line_max": 99,
"alpha_frac": 0.6844003607,
"autogenerated": false,
"ratio": 3.0805555555555557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010689728093557739,
"num_lines": 39
} |
"""Add Access logs table
Revision ID: 26cfd7b0439e
Revises: 6adeb5d3032e
Create Date: 2019-07-23 16:06:28.766128
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "26cfd7b0439e"
down_revision = "6adeb5d3032e"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("access_log", sa.Column("access_method", sa.String(), nullable=False))
op.add_column("access_log", sa.Column("access_type", sa.String(), nullable=False))
op.add_column("access_log", sa.Column("city", sa.String(), nullable=True))
op.add_column("access_log", sa.Column("country", sa.String(), nullable=True))
op.add_column("access_log", sa.Column("country_code", sa.String(), nullable=True))
op.add_column("access_log", sa.Column("extra_params", sa.String(), nullable=True))
op.add_column("access_log", sa.Column("ip_lat", sa.String(), nullable=True))
op.add_column("access_log", sa.Column("ip_long", sa.String(), nullable=True))
op.add_column("access_log", sa.Column("postal_code", sa.String(), nullable=True))
op.add_column("access_log", sa.Column("subdivision", sa.String(), nullable=True))
op.add_column("access_log", sa.Column("user_agent", sa.String(), nullable=True))
op.create_index("access_type", "access_log", ["access_date"], unique=False)
op.drop_index("access_log_date", table_name="access_log")
op.drop_column("access_log", "type")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("access_log", sa.Column("type", sa.VARCHAR(), autoincrement=False, nullable=True))
op.create_index("access_log_date", "access_log", ["access_date"], unique=False)
op.drop_index("access_type", table_name="access_log")
op.drop_column("access_log", "user_agent")
op.drop_column("access_log", "subdivision")
op.drop_column("access_log", "postal_code")
op.drop_column("access_log", "ip_long")
op.drop_column("access_log", "ip_lat")
op.drop_column("access_log", "extra_params")
op.drop_column("access_log", "country_code")
op.drop_column("access_log", "country")
op.drop_column("access_log", "city")
op.drop_column("access_log", "access_type")
op.drop_column("access_log", "access_method")
# ### end Alembic commands ###
| {
"repo_name": "psi4/mongo_qcdb",
"path": "qcfractal/alembic/versions/26cfd7b0439e_add_access_logs_table.py",
"copies": "2",
"size": "2411",
"license": "bsd-3-clause",
"hash": 6227631294846209000,
"line_mean": 43.6481481481,
"line_max": 100,
"alpha_frac": 0.669431771,
"autogenerated": false,
"ratio": 3.155759162303665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48251909333036647,
"avg_score": null,
"num_lines": null
} |
"""Add access_request table to manage requests to access datastores.
Revision ID: 5e4a03ef0bf0
Revises: 41f6a59a61f2
Create Date: 2016-09-09 17:39:57.846309
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5e4a03ef0bf0'
down_revision = 'b347b202819b'
def upgrade():
op.create_table(
'access_request',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('datasource_type', sa.String(length=200), nullable=True),
sa.Column('datasource_id', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('access_request')
| {
"repo_name": "SingTel-DataCo/incubator-superset",
"path": "superset/migrations/versions/5e4a03ef0bf0_add_request_access_model.py",
"copies": "13",
"size": "1064",
"license": "apache-2.0",
"hash": -1270022737268020700,
"line_mean": 31.2424242424,
"line_max": 75,
"alpha_frac": 0.6588345865,
"autogenerated": false,
"ratio": 3.3146417445482865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 33
} |
"""add access tier to workspace
Revision ID: fc1eb86aa8f4
Revises: a2d41894a561
Create Date: 2021-06-22 13:26:42.138628
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import WorkbenchWorkspaceAccessTier
# revision identifiers, used by Alembic.
revision = 'fc1eb86aa8f4'
down_revision = 'a2d41894a561'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('workbench_workspace_approved', sa.Column('access_tier', rdr_service.model.utils.Enum(WorkbenchWorkspaceAccessTier), nullable=True))
op.add_column('workbench_workspace_snapshot', sa.Column('access_tier', rdr_service.model.utils.Enum(WorkbenchWorkspaceAccessTier), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('workbench_workspace_snapshot', 'access_tier')
op.drop_column('workbench_workspace_approved', 'access_tier')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/fc1eb86aa8f4_add_access_tier_to_workspace.py",
"copies": "1",
"size": "1517",
"license": "bsd-3-clause",
"hash": -8332885029408151000,
"line_mean": 26.5818181818,
"line_max": 150,
"alpha_frac": 0.70468029,
"autogenerated": false,
"ratio": 3.4953917050691246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9603174121245492,
"avg_score": 0.019379574764726386,
"num_lines": 55
} |
"""add_accounts
Revision ID: 355b644453f9
Revises: e10a938e5801
Create Date: 2017-10-05 10:25:41.180601
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '355b644453f9'
down_revision = 'e10a938e5801'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('accounts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('instance', sa.String(length=32), nullable=True),
sa.Column('username', sa.String(length=32), nullable=False),
sa.Column('password', sa.String(length=32), nullable=False),
sa.Column('provider', sa.String(length=12), nullable=False),
sa.Column('level', sa.SmallInteger(), nullable=False),
sa.Column('model', sa.String(length=20), nullable=True),
sa.Column('device_version', sa.String(length=20), nullable=True),
sa.Column('device_id', sa.String(length=64), nullable=True),
sa.Column('hibernated', sa.Integer(), nullable=True),
sa.Column('reason', sa.String(length=12), nullable=True),
sa.Column('captchaed', sa.Integer(), nullable=True),
sa.Column('created', sa.Integer(), nullable=True),
sa.Column('updated', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username', name='ix_accounts_username_unique')
)
op.create_index(op.f('ix_accounts_hibernated'), 'accounts', ['hibernated'], unique=False)
op.create_index(op.f('ix_accounts_captchaed'), 'accounts', ['captchaed'], unique=False)
op.create_index(op.f('ix_accounts_instance'), 'accounts', ['instance'], unique=False)
op.create_index(op.f('ix_accounts_level'), 'accounts', ['level'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_accounts_level'), table_name='accounts')
op.drop_index(op.f('ix_accounts_instance'), table_name='accounts')
op.drop_index(op.f('ix_accounts_captchaed'), table_name='accounts')
op.drop_index(op.f('ix_accounts_hibernated'), table_name='accounts')
op.drop_table('accounts')
# ### end Alembic commands ###
| {
"repo_name": "DavisPoGo/Monocle",
"path": "migrations/versions/355b644453f9_add_accounts.py",
"copies": "1",
"size": "2242",
"license": "mit",
"hash": 2806388094154781000,
"line_mean": 41.3018867925,
"line_max": 93,
"alpha_frac": 0.6846565566,
"autogenerated": false,
"ratio": 3.2777777777777777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9432371921616094,
"avg_score": 0.006012482552336702,
"num_lines": 53
} |
"""add account table
Revision ID: 4cc9c46f9d8b
Revises: 725816dc500
Create Date: 2014-07-23 16:01:47.462597
"""
# revision identifiers, used by Alembic.
revision = '4cc9c46f9d8b'
down_revision = '725816dc500'
import warnings
from alembic import op
import sqlalchemy as sa
from gertty.dbsupport import sqlite_alter_columns, sqlite_drop_columns
def upgrade():
sqlite_drop_columns('message', ['name'])
sqlite_drop_columns('comment', ['name'])
sqlite_drop_columns('approval', ['name'])
sqlite_drop_columns('change', ['owner'])
op.create_table('account',
sa.Column('key', sa.Integer(), nullable=False),
sa.Column('id', sa.Integer(), index=True, unique=True, nullable=False),
sa.Column('name', sa.String(length=255)),
sa.Column('username', sa.String(length=255)),
sa.Column('email', sa.String(length=255)),
sa.PrimaryKeyConstraint('key')
)
op.create_index(op.f('ix_account_name'), 'account', ['name'], unique=True)
op.create_index(op.f('ix_account_username'), 'account', ['name'], unique=True)
op.create_index(op.f('ix_account_email'), 'account', ['name'], unique=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
op.add_column('message', sa.Column('account_key', sa.Integer()))
op.add_column('comment', sa.Column('account_key', sa.Integer()))
op.add_column('approval', sa.Column('account_key', sa.Integer()))
op.add_column('change', sa.Column('account_key', sa.Integer()))
sqlite_alter_columns('message', [
sa.Column('account_key', sa.Integer(), sa.ForeignKey('account.key'))
])
sqlite_alter_columns('comment', [
sa.Column('account_key', sa.Integer(), sa.ForeignKey('account.key'))
])
sqlite_alter_columns('approval', [
sa.Column('account_key', sa.Integer(), sa.ForeignKey('account.key'))
])
sqlite_alter_columns('change', [
sa.Column('account_key', sa.Integer(), sa.ForeignKey('account.key'))
])
op.create_index(op.f('ix_message_account_key'), 'message', ['account_key'], unique=False)
op.create_index(op.f('ix_comment_account_key'), 'comment', ['account_key'], unique=False)
op.create_index(op.f('ix_approval_account_key'), 'approval', ['account_key'], unique=False)
op.create_index(op.f('ix_change_account_key'), 'change', ['account_key'], unique=False)
connection = op.get_bind()
project = sa.sql.table('project', sa.sql.column('updated', sa.DateTime))
connection.execute(project.update().values({'updated':None}))
approval = sa.sql.table('approval', sa.sql.column('pending'))
connection.execute(approval.delete().where(approval.c.pending==False))
def downgrade():
pass
| {
"repo_name": "aspiers/gertty",
"path": "gertty/alembic/versions/4cc9c46f9d8b_add_account_table.py",
"copies": "1",
"size": "2761",
"license": "apache-2.0",
"hash": 2283768518075008000,
"line_mean": 36.8219178082,
"line_max": 95,
"alpha_frac": 0.6468670771,
"autogenerated": false,
"ratio": 3.408641975308642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4555509052408642,
"avg_score": null,
"num_lines": null
} |
"""Add ACL relationships
Revision ID: be32626451fb
Revises: None
Create Date: 2016-03-21 11:59:44.094720
Auto generated code by flask-migrate and Alembic.
"""
# This code is auto generated. Ignore linter errors.
# pylint: skip-file
# revision identifiers, used by Alembic.
revision = 'be32626451fb'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('searchindex_accesscontrolentry', sa.Column('group_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'searchindex_accesscontrolentry', 'group', ['group_id'], ['id'])
op.add_column('sketch_accesscontrolentry', sa.Column('group_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'sketch_accesscontrolentry', 'group', ['group_id'], ['id'])
op.add_column('view_accesscontrolentry', sa.Column('group_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'view_accesscontrolentry', 'group', ['group_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'view_accesscontrolentry', type_='foreignkey')
op.drop_column('view_accesscontrolentry', 'group_id')
op.drop_constraint(None, 'sketch_accesscontrolentry', type_='foreignkey')
op.drop_column('sketch_accesscontrolentry', 'group_id')
op.drop_constraint(None, 'searchindex_accesscontrolentry', type_='foreignkey')
op.drop_column('searchindex_accesscontrolentry', 'group_id')
### end Alembic commands ###
| {
"repo_name": "google/timesketch",
"path": "timesketch/migrations/versions/be32626451fb_.py",
"copies": "1",
"size": "1600",
"license": "apache-2.0",
"hash": -5024000130520575000,
"line_mean": 40.0256410256,
"line_max": 103,
"alpha_frac": 0.709375,
"autogenerated": false,
"ratio": 3.382663847780127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9559812590259703,
"avg_score": 0.006445251504084742,
"num_lines": 39
} |
"""Add a column
Revision ID: 37d1722a4621
Revises: None
Create Date: 2015-06-26 10:08:07.026259
"""
# revision identifiers, used by Alembic.
revision = '37d1722a4621'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('sites', sa.Column('redis', sa.DateTime))
def downgrade():
with op.batch_alter_table("sites") as batch_op:
batch_op.drop_column('redis')
# Output
# ========
# root@vagrant-ubuntu-trusty-64:/vagrant/dbmigrate/easyengine# sqlite3 /var/lib/ee/ee.db
# SQLite version 3.8.2 2013-12-06 14:53:30
# Enter ".help" for instructions
# Enter SQL statements terminated with a ";"
# sqlite> .schema sites
# CREATE TABLE sites (
# id INTEGER NOT NULL,
# sitename VARCHAR,
# site_type VARCHAR,
# cache_type VARCHAR,
# site_path VARCHAR,
# created_on DATETIME,
# is_enabled BOOLEAN NOT NULL,
# is_ssl BOOLEAN,
# storage_fs VARCHAR,
# storage_db VARCHAR,
# db_name VARCHAR,
# db_user VARCHAR,
# db_password VARCHAR,
# db_host VARCHAR,
# is_hhvm BOOLEAN,
# is_pagespeed BOOLEAN,
# PRIMARY KEY (id),
# UNIQUE (sitename),
# CHECK (is_enabled IN (0, 1)),
# CHECK (is_ssl IN (0, 1)),
# CHECK (is_hhvm IN (0, 1)),
# CHECK (is_pagespeed IN (0, 1))
# );
# sqlite>
# root@vagrant-ubuntu-trusty-64:/vagrant/dbmigrate/easyengine# alembic upgrade 37d1722a4621
# INFO [alembic.migration] Context impl SQLiteImpl.
# INFO [alembic.migration] Will assume non-transactional DDL.
# INFO [alembic.migration] Running upgrade -> 37d1722a4621, Add a column
# root@vagrant-ubuntu-trusty-64:/vagrant/dbmigrate/easyengine# sqlite3 /var/lib/ee/ee.db
# SQLite version 3.8.2 2013-12-06 14:53:30
# Enter ".help" for instructions
# Enter SQL statements terminated with a ";"
# sqlite> .schema sites
# CREATE TABLE sites (
# id INTEGER NOT NULL,
# sitename VARCHAR,
# site_type VARCHAR,
# cache_type VARCHAR,
# site_path VARCHAR,
# created_on DATETIME,
# is_enabled BOOLEAN NOT NULL,
# is_ssl BOOLEAN,
# storage_fs VARCHAR,
# storage_db VARCHAR,
# db_name VARCHAR,
# db_user VARCHAR,
# db_password VARCHAR,
# db_host VARCHAR,
# is_hhvm BOOLEAN,
# is_pagespeed BOOLEAN, redis DATETIME,
# PRIMARY KEY (id),
# UNIQUE (sitename),
# CHECK (is_enabled IN (0, 1)),
# CHECK (is_ssl IN (0, 1)),
# CHECK (is_hhvm IN (0, 1)),
# CHECK (is_pagespeed IN (0, 1))
# );
# sqlite>
# root@vagrant-ubuntu-trusty-64:/vagrant/dbmigrate/easyengine# alembic downgrade -1
# INFO [alembic.migration] Context impl SQLiteImpl.
# INFO [alembic.migration] Will assume non-transactional DDL.
# INFO [alembic.migration] Running downgrade 37d1722a4621 -> , Add a column
# root@vagrant-ubuntu-trusty-64:/vagrant/dbmigrate/easyengine# sqlite3 /var/lib/ee/ee.db
# SQLite version 3.8.2 2013-12-06 14:53:30
# Enter ".help" for instructions
# Enter SQL statements terminated with a ";"
# sqlite> .schema sites
# CREATE TABLE sites (
# id INTEGER NOT NULL,
# sitename VARCHAR,
# site_type VARCHAR,
# cache_type VARCHAR,
# site_path VARCHAR,
# created_on DATETIME,
# is_enabled BOOLEAN NOT NULL,
# is_ssl BOOLEAN,
# storage_fs VARCHAR,
# storage_db VARCHAR,
# db_name VARCHAR,
# db_user VARCHAR,
# db_password VARCHAR,
# db_host VARCHAR,
# is_hhvm BOOLEAN,
# is_pagespeed BOOLEAN,
# PRIMARY KEY (id),
# UNIQUE (sitename),
# CHECK (is_enabled IN (0, 1)),
# CHECK (is_ssl IN (0, 1)),
# CHECK (is_hhvm IN (0, 1)),
# CHECK (is_pagespeed IN (0, 1))
# );
# sqlite>
# root@vagrant-ubuntu-trusty-64:/vagrant/dbmigrate/easyengine#
| {
"repo_name": "rjdp/EE-dbmigrate",
"path": "alembic/versions/37d1722a4621_add_a_column.py",
"copies": "1",
"size": "4017",
"license": "mit",
"hash": -2799064995831511600,
"line_mean": 31.136,
"line_max": 91,
"alpha_frac": 0.6019417476,
"autogenerated": false,
"ratio": 3.0067365269461077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41086782745461076,
"avg_score": null,
"num_lines": null
} |
"""add action description table
Revision ID: d09a5945e4a0
Revises: d098df6021e2
Create Date: 2017-07-13 20:33:01.473711
"""
from alembic import op
import oslo_db
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd09a5945e4a0'
down_revision = 'd098df6021e2'
def upgrade():
op.create_table(
'action_descriptions',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(),
nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('action_type', sa.String(length=255), nullable=False),
sa.Column('description', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('action_type',
name='uniq_action_description0action_type')
)
def downgrade():
op.drop_table('action_descriptions')
| {
"repo_name": "stackforge/watcher",
"path": "watcher/db/sqlalchemy/alembic/versions/d09a5945e4a0_add_action_description_table.py",
"copies": "2",
"size": "1076",
"license": "apache-2.0",
"hash": -633961439938958800,
"line_mean": 28.8888888889,
"line_max": 74,
"alpha_frac": 0.6542750929,
"autogenerated": false,
"ratio": 3.5163398692810457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5170614962181045,
"avg_score": null,
"num_lines": null
} |
"""add action field to experiment_actions
Revision ID: ae72c799d321
Revises: da0fe56f616b
Create Date: 2016-07-24 13:00:06.432146
"""
# revision identifiers, used by Alembic.
revision = 'ae72c799d321'
down_revision = 'da0fe56f616b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.add_column('experiment_actions', sa.Column('action', sa.String(length=64), nullable=True))
op.create_index(op.f('ix_experiment_actions_action'), 'experiment_actions', ['action'], unique=False)
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiment_actions_action'), table_name='experiment_actions')
op.drop_column('experiment_actions', 'action')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.add_column('experiment_actions', sa.Column('action', sa.String(length=64), nullable=True))
op.create_index(op.f('ix_experiment_actions_action'), 'experiment_actions', ['action'], unique=False)
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiment_actions_action'), table_name='experiment_actions')
op.drop_column('experiment_actions', 'action')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.add_column('experiment_actions', sa.Column('action', sa.String(length=64), nullable=True))
op.create_index(op.f('ix_experiment_actions_action'), 'experiment_actions', ['action'], unique=False)
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiment_actions_action'), table_name='experiment_actions')
op.drop_column('experiment_actions', 'action')
### end Alembic commands ###
| {
"repo_name": "c4fcm/CivilServant",
"path": "alembic/versions/ae72c799d321_add_action_field_to_experiment_actions.py",
"copies": "1",
"size": "2252",
"license": "mit",
"hash": -4793044192457673000,
"line_mean": 31.1714285714,
"line_max": 105,
"alpha_frac": 0.6896092362,
"autogenerated": false,
"ratio": 3.632258064516129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4821867300716129,
"avg_score": null,
"num_lines": null
} |
"""add activation and expiration date columns to executive_compensation table
Revision ID: 427320dea2ab
Revises: 551a8e1cc551
Create Date: 2017-07-10 14:00:30.273558
"""
# revision identifiers, used by Alembic.
revision = '427320dea2ab'
down_revision = '551a8e1cc551'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('executive_compensation', sa.Column('activation_date', sa.Date(), nullable=True))
op.add_column('executive_compensation', sa.Column('expiration_date', sa.Date(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('executive_compensation', 'expiration_date')
op.drop_column('executive_compensation', 'activation_date')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/427320dea2ab_add_activation_and_expiration_date_.py",
"copies": "1",
"size": "1092",
"license": "cc0-1.0",
"hash": -5411111967507352000,
"line_mean": 25,
"line_max": 99,
"alpha_frac": 0.7087912088,
"autogenerated": false,
"ratio": 3.4231974921630095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4631988700963009,
"avg_score": null,
"num_lines": null
} |
"""Add Active translation users
Revision ID: 2a68ba66c32b
Revises: 4148c3cb14ad
Create Date: 2015-04-27 13:32:26.521250
"""
# revision identifiers, used by Alembic.
revision = '2a68ba66c32b'
down_revision = '4148c3cb14ad'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('TranslationCurrentActiveUsers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('bundle_id', sa.Integer(), nullable=True),
sa.Column('last_check', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['bundle_id'], ['TranslationBundles.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['GoLabOAuthUsers.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_TranslationCurrentActiveUsers_last_check', 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationCurrentActiveUsers_last_check', table_name='TranslationCurrentActiveUsers')
op.drop_table('TranslationCurrentActiveUsers')
### end Alembic commands ###
| {
"repo_name": "porduna/appcomposer",
"path": "alembic/versions/2a68ba66c32b_add_active_translation_users.py",
"copies": "3",
"size": "1251",
"license": "bsd-2-clause",
"hash": 6824805748001724000,
"line_mean": 33.75,
"line_max": 130,
"alpha_frac": 0.7114308553,
"autogenerated": false,
"ratio": 3.553977272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5765408128027273,
"avg_score": null,
"num_lines": null
} |
"""Add activity tables
Revision ID: 19f590834366
Revises: 43cda5e14cf0
Create Date: 2012-11-14 23:31:56.202053
"""
# revision identifiers, used by Alembic.
revision = '19f590834366'
down_revision = '43cda5e14cf0'
from alembic import op
import sqlalchemy as db
def upgrade():
op.create_table('activities',
db.Column('id', db.Integer, primary_key=True),
db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
db.Column('title', db.String(255)),
db.Column('content', db.Text),
db.Column('slug', db.String(255)),
db.Column('start_time', db.DateTime),
db.Column('end_time', db.DateTime),
db.Column('address', db.String(255)),
db.Column('longitude', db.Numeric(10, 7)),
db.Column('latitude', db.Numeric(10, 7)),
db.Column('created_time', db.DateTime),
db.Column('modified_time', db.DateTime))
op.create_table('activity_users',
db.Column('activity_id', db.Integer, db.ForeignKey('activities.id'), primary_key=True),
db.Column('user_id', db.Integer, db.ForeignKey('users.id'), primary_key=True))
op.create_table('resources',
db.Column('id', db.Integer, primary_key=True),
db.Column('cser_id', db.Integer, db.ForeignKey('users.id')),
db.Column('filetype', db.String(50)),
db.Column('url', db.String(255)),
db.Column('created_time', db.DateTime),
db.Column('modified_time', db.DateTime))
op.create_table('activity_resources',
db.Column('activity_id', db.Integer, db.ForeignKey('activities.id'), primary_key=True),
db.Column('resource_id', db.Integer, db.ForeignKey('resources.id'), primary_key=True))
op.create_table('activity_comments',
db.Column('id', db.Integer, primary_key=True),
db.Column('author_name', db.String(50)),
db.Column('author_email', db.String(255)),
db.Column('author_site', db.String(255)),
db.Column('content', db.Text, nullable=False),
db.Column('created_time', db.DateTime),
db.Column('modified_time', db.DateTime),
db.Column('parent_id', db.Integer, db.ForeignKey('activity_comments.id')),
db.Column('user_id', db.Integer, db.ForeignKey('users.id')))
op.create_table('topics',
db.Column('id', db.Integer, primary_key=True),
db.Column('name', db.String(255)),
db.Column('inro', db.Text),
db.Column('rate_count', db.Integer, default=0),
db.Column('user_id', db.Integer, db.ForeignKey('users.id'), nullable=False))
op.create_table('topic_resources',
db.Column('topic_id', db.Integer, db.ForeignKey('topics.id'), primary_key=True),
db.Column('resource_id', db.Integer, db.ForeignKey('resources.id'), primary_key=True))
op.create_table('topic_users',
db.Column('topic_id', db.Integer, db.ForeignKey('topics.id'), primary_key=True),
db.Column('user_id', db.Integer, db.ForeignKey('users.id'), primary_key=True))
def downgrade():
op.drop_table('topic_users')
op.drop_table('topic_resources')
op.drop_table('topics')
op.drop_table('activity_comments')
op.drop_table('activity_resources')
op.drop_table('resources')
op.drop_table('activity_users')
op.drop_table('activities')
| {
"repo_name": "chenmingd/ScriptFan.com",
"path": "website/migrate/versions/19f590834366_add_activity_tables.py",
"copies": "2",
"size": "3285",
"license": "bsd-3-clause",
"hash": -8184891875619499000,
"line_mean": 39.0609756098,
"line_max": 95,
"alpha_frac": 0.6347031963,
"autogenerated": false,
"ratio": 3.3520408163265305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9964112289910301,
"avg_score": 0.004526344543245928,
"num_lines": 82
} |
"""add additional fields to capture sample family Id and disposed status
Revision ID: 1e944af3ad04
Revises: 041fdb188c55
Create Date: 2018-12-31 11:10:25.824690
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import SampleStatus
# revision identifiers, used by Alembic.
revision = "1e944af3ad04"
down_revision = "041fdb188c55"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_stored_sample", sa.Column("disposed", model.utils.UTCDateTime(), nullable=True))
op.add_column("biobank_stored_sample", sa.Column("family_id", sa.String(length=80), nullable=True))
op.add_column("biobank_stored_sample", sa.Column("status", model.utils.Enum(SampleStatus), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("biobank_stored_sample", "status")
op.drop_column("biobank_stored_sample", "family_id")
op.drop_column("biobank_stored_sample", "disposed")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/1e944af3ad04_da_814_added_additional_fields_to_.py",
"copies": "1",
"size": "1583",
"license": "bsd-3-clause",
"hash": 2558314855798292500,
"line_mean": 28.3148148148,
"line_max": 110,
"alpha_frac": 0.6910928617,
"autogenerated": false,
"ratio": 3.502212389380531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9690723431369082,
"avg_score": 0.0005163639422898681,
"num_lines": 54
} |
"""Add additional year to the climate files
python add_new_year.py <scenario> <year_to_add>
The previous year is choosen based on if the year to add is a leap year or not,
if it is, then pick a year four years ago, if it isn't, use last year
"""
import glob
import sys
import os
import subprocess
from tqdm import tqdm
from pyiem.util import logger
LOG = logger()
def parse_filename(filename):
"""The filename tells us the location."""
tokens = filename.rsplit(".", 1)[0].split("x")
return float(tokens[0]) * -1.0, float(tokens[1])
def workflow(filename, newyear, analogyear):
"""Effort this file, please"""
# Figure out what this file's lon/lat values are
lon, lat = parse_filename(filename)
# LOG.debug("%s -> %.2f %.2f", filename, lon, lat)
lines = open(filename).readlines()
# Replace the header information denoting years simulated
years_simulated = (newyear - 2007) + 1
lines[4] = (
" %.2f %.2f 289 %i 2007"
" %i\n"
) % (lat, lon, years_simulated, years_simulated)
data = "".join(lines)
if data.find("1\t1\t%s" % (newyear,)) > 0:
LOG.info("%s already has %s data", filename, newyear)
return
pos = data.find("1\t1\t%s" % (analogyear,))
pos2 = data.find("1\t1\t%s" % (analogyear + 1,))
content = data[pos:pos2] if pos2 > 0 else data[pos:]
with open("/tmp/%s" % (filename,), "w") as fh:
# careful here to get the line feeds right
fh.write(data + content.replace(str(analogyear), str(newyear)))
subprocess.call("mv /tmp/%s %s" % (filename, filename), shell=True)
def compute_analog_year(year):
"""Figure out which year to use as an analog."""
analogyear = year - 1
# If last year is a leap year. go another back
if analogyear % 4 == 0:
analogyear -= 1
# if the new year is a leap year, use something 4 years back
if year % 4 == 0:
analogyear = year - 4
return analogyear
def main(argv):
"""Go Main Go"""
scenario = argv[1]
year = int(argv[2])
analogyear = compute_analog_year(year)
LOG.info("Using analog year %s for new year %s", analogyear, year)
os.chdir("/i/%s/cli" % (scenario,))
for mydir in tqdm(glob.glob("*")):
os.chdir(mydir)
for fn in glob.glob("*.cli"):
workflow(fn, year, analogyear)
os.chdir("..")
if __name__ == "__main__":
main(sys.argv)
def test_analog():
"""Test that we can do the right thing."""
assert compute_analog_year(2021) == 2019
assert compute_analog_year(2020) == 2016
| {
"repo_name": "akrherz/idep",
"path": "scripts/cligen/add_new_year.py",
"copies": "2",
"size": "2610",
"license": "mit",
"hash": -177412779348833500,
"line_mean": 30.4457831325,
"line_max": 79,
"alpha_frac": 0.6042145594,
"autogenerated": false,
"ratio": 3.2954545454545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4899669104854546,
"avg_score": null,
"num_lines": null
} |
"""Add address relation
Revision ID: ff700db83195
Revises: f85ef1ef556c
Create Date: 2019-10-03 19:15:55.716640
"""
from typing import List, Callable
import sqlalchemy as sa
from alembic import op
from sqlalchemy import orm
from sqlalchemy.orm import declarative_base
# revision identifiers, used by Alembic.
revision = 'ff700db83195'
down_revision = 'f85ef1ef556c'
branch_labels = None
depends_on = None
Base = declarative_base()
DEFAULT_CITY = "Dresden"
DEFAULT_COUNTRY = "Germany"
class _Address(Base):
"""A baked version of the Address table for easier data migration"""
__tablename__ = 'address'
id = sa.Column(sa.Integer, primary_key=True)
street = sa.Column(sa.String(), nullable=False)
number = sa.Column(sa.String(), nullable=False)
addition = sa.Column(sa.String())
zip_code = sa.Column(sa.String(), nullable=False, default="01217")
city = sa.Column(sa.String(), nullable=False, default=DEFAULT_CITY)
state = sa.Column(sa.String(), nullable=True)
country = sa.Column(sa.String(), nullable=False, default=DEFAULT_COUNTRY)
# temporary columns for easier data migration
tmp_building_id = sa.Column(sa.Integer, nullable=True)
building = orm.relationship(
'_Building',
primaryjoin='foreign(_Address.tmp_building_id) == _Building.id'
)
tmp_room_id = sa.Column(sa.Integer, nullable=True)
room = orm.relationship(
'_Room',
primaryjoin='foreign(_Address.tmp_room_id) == _Room.id'
)
# These are subsets of tables as they are on the base revision
class _Building(Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
number = sa.Column(sa.String())
short_name = sa.Column(sa.String())
street = sa.Column(sa.String())
class _Room(Base):
__tablename__ = 'room'
id = sa.Column(sa.Integer, primary_key=True)
number = sa.Column(sa.String())
level = sa.Column(sa.Integer)
inhabitable = sa.Column(sa.Boolean)
building_id = sa.Column(sa.Integer, sa.ForeignKey(_Building.id))
building = orm.relationship(_Building)
class _RoomAfter(_Room):
address_id = sa.Column(sa.Integer, sa.ForeignKey(_Address.id))
class _User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
login = sa.Column(sa.String(40), nullable=False, unique=True)
room_id = sa.Column(sa.Integer, sa.ForeignKey(_Room.id))
room = orm.relationship(_Room)
class _UserAfter(_User):
address_id = sa.Column(sa.Integer, sa.ForeignKey(_Address.id))
def upgrade():
cleanups: List[Callable] = []
# SCHEMA MIGRATION
# renaming constraint 'address' → 'building_address'
op.create_unique_constraint('building_address', 'building', ['street', 'number'])
op.drop_constraint('address', 'building', type_='unique')
op.create_table(
'address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('street', sa.String(), nullable=False),
sa.Column('number', sa.String(), nullable=False),
sa.Column('addition', sa.String(), nullable=True),
sa.Column('zip_code', sa.String(), nullable=False),
sa.Column('city', sa.String(), nullable=False, server_default=DEFAULT_CITY),
sa.Column('state', sa.String(), nullable=True),
sa.Column('country', sa.String(), nullable=False, server_default=DEFAULT_COUNTRY),
# Temporary columns
sa.Column('tmp_building_id', sa.Integer, nullable=True),
sa.Column('tmp_room_id', sa.Integer, nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('street', 'number', 'addition', 'zip_code', 'city',
'state', 'country')
)
cleanups.append(lambda: op.drop_column('address', 'tmp_building_id'))
cleanups.append(lambda: op.drop_column('address', 'tmp_room_id'))
bind = op.get_bind()
session = orm.Session(bind=bind)
# DATA MIGRATION I: add dummy address
dummy_id = add_dummy_address(session)
dummy_default_cause = sa.schema.DefaultClause(f"{dummy_id}")
# DATA MIGRATION II: add building addresses
add_building_addresses(session)
# DATA MIGRATION III: add room addresses
add_room_addresses(session)
# FURTHER SCHEMA MIGRATION…
op.add_column('room', sa.Column('address_id', sa.Integer(), nullable=False,
server_default=dummy_default_cause))
cleanups.append(lambda: op.alter_column('room', 'address_id', server_default=None))
op.create_index(op.f('ix_room_address_id'), 'room', ['address_id'], unique=False)
op.create_foreign_key(None, 'room', 'address', ['address_id'], ['id'])
# DATA MIGRATION IV: set `address_id` to building's address for uninhabitable rooms
set_uninhabitable_room_addresses(session)
# DATA MIGRATION IV: set `address_id` to room's address for inhabitable rooms
set_inhabitable_room_addresses(session)
# FURTHER SCHEMA MIGRATION…
op.add_column('user', sa.Column('address_id', sa.Integer(), nullable=False,
server_default=dummy_default_cause))
cleanups.append(lambda: op.alter_column('user', 'address_id', server_default=None))
op.create_index(op.f('ix_user_address_id'), 'user', ['address_id'], unique=False)
op.create_foreign_key(None, 'user', 'address', ['address_id'], ['id'])
# DATA MIGRATION VI: set `user.address` for users with room
set_user_addresses(session)
# FURTHER SCHEMA MIGRATION (cleanup)
for action in cleanups:
action()
def set_user_addresses(session):
session.execute(
sa.update(_User)
.values(address_id=_RoomAfter.address_id)
.where(_RoomAfter.id == _User.room_id) # implies `room_id` is not null
)
def set_inhabitable_room_addresses(session):
session.execute(
sa.update(_Room)
.where(_Room.inhabitable)
.values(address_id=_Address.id)
.where(_Room.id == _Address.tmp_room_id)
)
def set_uninhabitable_room_addresses(session):
session.execute(
sa.update(_Room)
.where(sa.sql.not_(_Room.inhabitable))
.values(address_id=_Address.id)
.where(_Room.building_id == _Address.tmp_building_id)
)
def add_room_addresses(session):
room_select = (
sa.select(_Room.id, (sa.cast(_Room.level, sa.String)
+ "-"
+ _Room.number).label("addition"))
.select_from(_Room)
.select_from(_Building)
.where(_Building.id == _Room.building_id)
.where(_Room.inhabitable)
.column(_Building.street)
.column(_Building.number)
.alias('inhabitable_room_info')
)
session.execute(
sa.insert(_Address)
.from_select([_Address.tmp_room_id, _Address.addition, _Address.street, _Address.number],
room_select)
)
def add_building_addresses(session):
building_select = sa.select(_Building.street, _Building.number, _Building.id)
session.execute(
sa.insert(_Address)
.from_select([_Address.street, _Address.number, _Address.tmp_building_id],
building_select)
.returning(_Address.id)
)
def add_dummy_address(session):
# noinspection SpellCheckingInspection
dummy_addr = _Address(street="Niemandsstraße", number="42", zip_code="33602",
city="Bielefeld", state="Niedersachsen")
session.add(dummy_addr)
session.commit()
session.refresh(dummy_addr)
dummy_id = dummy_addr.id
return dummy_id
def downgrade():
op.drop_constraint('user_address_id_fkey', 'user', type_='foreignkey')
op.drop_index(op.f('ix_user_address_id'), table_name='user')
op.drop_column('user', 'address_id')
op.drop_constraint('room_address_id_fkey', 'room', type_='foreignkey')
op.drop_index(op.f('ix_room_address_id'), table_name='room')
op.drop_column('room', 'address_id')
op.drop_table('address')
op.create_unique_constraint('address', 'building', ['street', 'number'])
op.drop_constraint('building_address', 'building', type_='unique')
| {
"repo_name": "agdsn/pycroft",
"path": "pycroft/model/alembic/versions/ff700db83195_add_address_relation.py",
"copies": "1",
"size": "8146",
"license": "apache-2.0",
"hash": -4867022263849872000,
"line_mean": 33.6340425532,
"line_max": 97,
"alpha_frac": 0.6434451407,
"autogenerated": false,
"ratio": 3.4356268467707896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9575868455829302,
"avg_score": 0.0006407063282974531,
"num_lines": 235
} |
"""Add Admin role.
Revision ID: 3be0d9d2c9f8
Revises: 3148b80be376
Create Date: 2013-10-05 14:07:22.359908
"""
# revision identifiers, used by Alembic.
revision = '3be0d9d2c9f8'
down_revision = '3148b80be376'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, and_
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
contexts_table = table('contexts',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
)
def upgrade():
current_datetime = datetime.now()
# Add an explicit Admin context with id 0
op.execute(
contexts_table.insert().values(
name='System Administration',
description='Context for super-user permissions.',
))
# Insert with id set seems to fail on mysql due to auto-incremented id.
# force it after the fact.
op.execute(
contexts_table.update().values(id=0).where(
contexts_table.c.name=='System Administration'))
# Add the System Administrator role
op.execute(
roles_table.insert().values(
name='System Administrator',
description='gGRC System Administrator with super-user privileges',
permissions_json=json.dumps({
'__GGRC_ADMIN__': ['__GGRC_ALL__'],
}),
created_at=current_datetime,
updated_at=current_datetime,
scope='Admin'
))
def downgrade():
op.execute(
roles_table.delete().where(
and_(
roles_table.c.name=='System Administrator',
roles_table.c.scope=='Admin')
))
op.execute(contexts_table.delete().where(contexts_table.c.id==0))
| {
"repo_name": "vladan-m/ggrc-core",
"path": "src/ggrc_basic_permissions/migrations/versions/20131005140722_3be0d9d2c9f8_add_admin_role.py",
"copies": "2",
"size": "1994",
"license": "apache-2.0",
"hash": -1588333283341607700,
"line_mean": 26.6944444444,
"line_max": 75,
"alpha_frac": 0.646439318,
"autogenerated": false,
"ratio": 3.6320582877959926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5278497605795993,
"avg_score": null,
"num_lines": null
} |
"""Add a flag to pause and wait for all Travis jobs to complete."""
from __future__ import print_function
import os
import sys
import json
import time
from tox.config import _split_env as split_env
try:
import urllib.request as urllib2
except ImportError:
import urllib2 # Python 2
from .utils import TRAVIS_FACTORS, parse_dict
# Exit code constants. They are purposely undocumented.
# Please do not depend on their values.
NO_GITHUB_TOKEN = 32
INVALID_POLLING_INTERVAL = 33
INCOMPLETE_TRAVIS_ENVIRONMENT = 34
JOBS_FAILED = 35
def travis_after(ini, envlist):
"""Wait for all jobs to finish, then exit successfully."""
# after-all disabled for pull requests
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return
if not after_config_matches(ini, envlist):
return # This is not the one that needs to wait
github_token = os.environ.get('GITHUB_TOKEN')
if not github_token:
print('No GitHub token given.', file=sys.stderr)
sys.exit(NO_GITHUB_TOKEN)
api_url = os.environ.get('TRAVIS_API_URL', 'https://api.travis-ci.org')
build_id = os.environ.get('TRAVIS_BUILD_ID')
job_number = os.environ.get('TRAVIS_JOB_NUMBER')
try:
polling_interval = int(os.environ.get('TRAVIS_POLLING_INTERVAL', 5))
except ValueError:
print('Invalid polling interval given: {0}'.format(
repr(os.environ.get('TRAVIS_POLLING_INTERVAL'))), file=sys.stderr)
sys.exit(INVALID_POLLING_INTERVAL)
if not all([api_url, build_id, job_number]):
print('Required Travis environment not given.', file=sys.stderr)
sys.exit(INCOMPLETE_TRAVIS_ENVIRONMENT)
# This may raise an Exception, and it should be printed
job_statuses = get_job_statuses(
github_token, api_url, build_id, polling_interval, job_number)
if not all(job_statuses):
print('Some jobs were not successful.')
sys.exit(JOBS_FAILED)
print('All required jobs were successful.')
def after_config_matches(ini, envlist):
"""Determine if this job should wait for the others."""
section = ini.sections.get('travis:after', {})
if not section:
return False # Never wait if it's not configured
if 'envlist' in section or 'toxenv' in section:
if 'toxenv' in section:
print('The "toxenv" key of the [travis:after] section is '
'deprecated in favor of the "envlist" key.', file=sys.stderr)
toxenv = section.get('toxenv')
required = set(split_env(section.get('envlist', toxenv) or ''))
actual = set(envlist)
if required - actual:
return False
# Translate travis requirements to env requirements
env_requirements = [
(TRAVIS_FACTORS[factor], value) for factor, value
in parse_dict(section.get('travis', '')).items()
if factor in TRAVIS_FACTORS
] + [
(name, value) for name, value
in parse_dict(section.get('env', '')).items()
]
return all([
os.environ.get(name) == value
for name, value in env_requirements
])
def get_job_statuses(github_token, api_url, build_id,
polling_interval, job_number):
"""Wait for all the travis jobs to complete.
Once the other jobs are complete, return a list of booleans,
indicating whether or not the job was successful. Ignore jobs
marked "allow_failure".
"""
auth = get_json('{api_url}/auth/github'.format(api_url=api_url),
data={'github_token': github_token})['access_token']
while True:
build = get_json('{api_url}/builds/{build_id}'.format(
api_url=api_url, build_id=build_id), auth=auth)
jobs = [job for job in build['jobs']
if job['number'] != job_number and
not job['allow_failure']] # Ignore allowed failures
if all(job['finished_at'] for job in jobs):
break # All the jobs have completed
elif any(job['state'] != 'passed'
for job in jobs if job['finished_at']):
break # Some required job that finished did not pass
print('Waiting for jobs to complete: {job_numbers}'.format(
job_numbers=[job['number'] for job in jobs
if not job['finished_at']]))
time.sleep(polling_interval)
return [job['state'] == 'passed' for job in jobs]
def get_json(url, auth=None, data=None):
"""Make a GET request, and return the response as parsed JSON."""
headers = {
'Accept': 'application/vnd.travis-ci.2+json',
'User-Agent': 'Travis/Tox-Travis-1.0a',
# User-Agent must start with "Travis/" in order to work
}
if auth:
headers['Authorization'] = 'token {auth}'.format(auth=auth)
params = {}
if data:
headers['Content-Type'] = 'application/json'
params['data'] = json.dumps(data).encode('utf-8')
request = urllib2.Request(url, headers=headers, **params)
response = urllib2.urlopen(request).read()
return json.loads(response.decode('utf-8'))
| {
"repo_name": "tox-dev/tox-travis",
"path": "src/tox_travis/after.py",
"copies": "2",
"size": "5104",
"license": "mit",
"hash": -8898074917901155000,
"line_mean": 33.7210884354,
"line_max": 79,
"alpha_frac": 0.6291144201,
"autogenerated": false,
"ratio": 3.6798846431146357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 147
} |
"""Add a fun 'group 8 ball' command."""
import random
import re
from plumeria.command import commands, channel_only, CommandError
from plumeria.core.activity import tracker
from plumeria.transport.status import ONLINE
# maximum number of users if we don't have a list of recent chatters
MAX_USERS = 10
def map_choice(choice):
return "{clap}**{most}** ({most_pct:.0f}%)".format(
most=choice[0],
most_pct=choice[1] * 100,
clap="\N{CLAPPING HANDS SIGN} " if choice[1] >= 0.9 else "",
)
@commands.create("group prob", "gp", category="Fun")
@channel_only
async def group_prob(message):
"""
A fun command that picks random people as "most likely" and "least likely"
from the channel for a question.
Example::
/gp find love in a bathroom
"""
query = message.content.strip()
if not len(query):
raise CommandError("A statement or question must be posed!")
users = await tracker.get_recent_users(message.channel)
# if we don't have a lot of users, get the member list of the channel
if len(users) < 2:
users = []
for member in message.channel.members:
# don't include ourselves
if member == message.channel.transport.user:
continue
if member.status == ONLINE:
users.append(member)
if len(users) > MAX_USERS:
users = []
break
if len(users) < 2:
raise CommandError("Not enough people have said anything recently to do this command.")
choices = list(map(lambda user: (user.display_name, random.random()), users))
choices.sort(key=lambda c: -c[1])
return "The {statement} **{question}** is {most} " \
"and the *least* likely is **{least}** ({least_pct:.0f}%)".format(
statement='probability of' if re.search("^[A-Za-z-]+ing\s", query, re.IGNORECASE) else 'most likely to',
question=query,
most=", ".join(map(map_choice, choices[:-1][:3])),
least=choices[-1][0],
least_pct=choices[-1][1] * 100,
)
def setup():
commands.add(group_prob)
| {
"repo_name": "sk89q/Plumeria",
"path": "orchard/group_prob.py",
"copies": "1",
"size": "2156",
"license": "mit",
"hash": -5929352779811134000,
"line_mean": 29.8,
"line_max": 112,
"alpha_frac": 0.603432282,
"autogenerated": false,
"ratio": 3.7495652173913046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9821291257140989,
"avg_score": 0.006341248450062978,
"num_lines": 70
} |
"""Add age and disabled columns to category table
Revision ID: 2eb054267c19
Revises: 1145cc4ac43b
Create Date: 2015-02-01 20:37:41.995995
"""
# revision identifiers, used by Alembic.
revision = '2eb054267c19'
down_revision = '1145cc4ac43b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table
def upgrade():
op.add_column('shopping_category',
sa.Column('disabled', sa.Boolean))
op.add_column('shopping_category',
sa.Column('min_age', sa.Integer))
op.add_column('shopping_category',
sa.Column('max_age', sa.Integer))
shopping_categories = table(
'shopping_category',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(75), nullable=False),
sa.Column('daily_limit', sa.Integer, nullable=True),
sa.Column('monthly_limit', sa.Integer, nullable=False),
sa.Column('family_wide', sa.Boolean, nullable=False),
sa.Column('order', sa.Integer, nullable=False),
sa.Column('min_age', sa.Integer, nullable=True),
sa.Column('max_age', sa.Integer, nullable=True),
sa.Column('disabled', sa.Boolean, nullable=False)
)
op.execute(
shopping_categories.update().
values({'disabled': op.inline_literal(False)})
)
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Baby')).
values({'min_age': op.inline_literal('0'),
'max_age': op.inline_literal('2')})
)
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Books')).
values({'min_age': op.inline_literal('0'),
'max_age': op.inline_literal('16')})
)
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Toys')).
values({'min_age': op.inline_literal('0'),
'max_age': op.inline_literal('16')})
)
def downgrade():
op.drop_column('shopping_category', 'disabled')
op.drop_column('shopping_category', 'min_age')
op.drop_column('shopping_category', 'max_age')
| {
"repo_name": "jlutz777/FreeStore",
"path": "alembic/versions/2eb054267c19_add_age_and_disabled_columns_to_.py",
"copies": "1",
"size": "2229",
"license": "mit",
"hash": 6362944066433224000,
"line_mean": 31.3043478261,
"line_max": 72,
"alpha_frac": 0.6114849708,
"autogenerated": false,
"ratio": 3.6009693053311795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9704088482084856,
"avg_score": 0.0016731588092646693,
"num_lines": 69
} |
"""Add agency name and sub-tier agency name to published award financial assistance
Revision ID: 5664b0e3e179
Revises: ce1087583081
Create Date: 2017-05-19 02:47:18.081619
"""
# revision identifiers, used by Alembic.
revision = '5664b0e3e179'
down_revision = 'ce1087583081'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
op.add_column('published_award_financial_assistance', sa.Column('awarding_agency_name', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('awarding_sub_tier_agency_n', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('funding_agency_name', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('funding_sub_tier_agency_na', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
op.drop_column('published_award_financial_assistance', 'funding_sub_tier_agency_na')
op.drop_column('published_award_financial_assistance', 'funding_agency_name')
op.drop_column('published_award_financial_assistance', 'awarding_sub_tier_agency_n')
op.drop_column('published_award_financial_assistance', 'awarding_agency_name')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/5664b0e3e179_add_agency_name_and_sub_tier_agency_.py",
"copies": "1",
"size": "1469",
"license": "cc0-1.0",
"hash": -6512015536320690000,
"line_mean": 34.8292682927,
"line_max": 124,
"alpha_frac": 0.7304288632,
"autogenerated": false,
"ratio": 3.041407867494824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4271836730694824,
"avg_score": null,
"num_lines": null
} |
"""Add agency_type to SQS and FileRequest tables
Revision ID: a9b778fd5181
Revises: c7da6e4c9b8a
Create Date: 2018-08-22 11:07:57.731050
"""
# revision identifiers, used by Alembic.
revision = 'a9b778fd5181'
down_revision = 'c7da6e4c9b8a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("CREATE TYPE agency_types AS ENUM ('awarding', 'funding')")
op.add_column('file_request', sa.Column('agency_type', sa.Enum('awarding', 'funding', name='agency_types'), server_default='awarding', nullable=True))
op.execute("""
UPDATE file_request
SET agency_type = 'awarding'
WHERE agency_type IS NULL
""")
op.alter_column('file_request', 'agency_type',
existing_type=sa.Enum('awarding', 'funding', name='agency_types'),
nullable=False)
op.create_index(op.f('ix_file_request_agency_type'), 'file_request', ['agency_type'], unique=False)
op.add_column('sqs', sa.Column('agency_type', sa.Enum('awarding', 'funding', name='agency_types'), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('sqs', 'agency_type')
op.drop_index(op.f('ix_file_request_agency_type'), table_name='file_request')
op.drop_column('file_request', 'agency_type')
op.execute("DROP TYPE agency_types")
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/a9b778fd5181_add_agency_type_to_sqs_and_filerequest_.py",
"copies": "1",
"size": "1761",
"license": "cc0-1.0",
"hash": 7032322507280407000,
"line_mean": 30.4464285714,
"line_max": 154,
"alpha_frac": 0.6394094265,
"autogenerated": false,
"ratio": 3.2793296089385473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.931424424095094,
"avg_score": 0.020898958897521424,
"num_lines": 56
} |
"""Add a genome to an existing (genomeless) panel report.
"""
import os
import json
import requests
from requests.auth import HTTPBasicAuth
import sys
import argparse
# Load environment variables for request authentication parameters
if "FABRIC_API_PASSWORD" not in os.environ:
sys.exit("FABRIC_API_PASSWORD environment variable missing")
if "FABRIC_API_LOGIN" not in os.environ:
sys.exit("FABRIC_API_LOGIN environment variable missing")
FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']
FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']
FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')
auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)
def add_genome_to_clinical_report(clinical_report_id,
proband_genome_id=None,
proband_sex=None):
"""Use the Omicia API to add genome(s) to a clinical report
"""
# Construct url and request
url = "{}/reports/{}".format(FABRIC_API_URL, clinical_report_id)
url_payload = {
'proband_genome_id': proband_genome_id,
'proband_sex': proband_sex
}
sys.stdout.write("Adding genome(s) to report...")
sys.stdout.write("\n\n")
sys.stdout.flush()
result = requests.put(url, auth=auth, data=json.dumps(url_payload))
return result.json()
def upload_genome_to_project(project_id, label, sex, file_format, file_name, external_id=""):
"""Use the Omicia API to add a genome, in vcf format, to a project.
Returns the newly uploaded genome's id.
"""
#Construct request
url = "{}/projects/{}/genomes?genome_label={}&genome_sex={}&external_id={}\
&assembly_version=hg19&format={}"
url = url.format(FABRIC_API_URL, project_id, label, sex, external_id, file_format)
sys.stdout.write("Uploading genome...\n")
with open(file_name, 'rb') as file_handle:
#Post request and return id of newly uploaded genome
result = requests.put(url, auth=auth, data=file_handle)
return result.json()
def main():
"""Main function. Add genomes and metadata to an existing clinical report.
"""
parser = argparse.ArgumentParser(description='Add genome ids or vaast report ids to existing clinical reports.')
parser.add_argument('clinical_report_id', metavar='clinical_report_id', type=int)
parser.add_argument('project_id', metavar='project_id', type=int)
parser.add_argument('genome_label', metavar='genome_label', type=str)
parser.add_argument('sex', metavar='sex', type=str, choices=['female', 'male', 'unspecified'])
parser.add_argument('file_format', metavar='file_format', type=str, choices=['vcf', 'vcf.gz', 'vcf.bz2'])
parser.add_argument('file_name', metavar='file_name', type=str)
parser.add_argument('--genome_external_id', metavar='genome_external_id', type=str)
args = parser.parse_args()
cr_id = args.clinical_report_id
project_id = args.project_id
genome_label = args.genome_label
sex = args.sex
file_format = args.file_format
file_name = args.file_name
genome_external_id = args.genome_external_id
genome_json = upload_genome_to_project(project_id,
genome_label,
sex,
file_format,
file_name,
genome_external_id)
try:
genome_id = genome_json["genome_id"]
sys.stdout.write("genome_id: {}\n".format(genome_id))
except KeyError:
if genome_json['description']:
sys.stdout.write('Error: {}\n'.format(genome_json['description']))
else:
sys.stdout.write('Something went wrong...')
sys.exit("Exiting...")
if sex == 'male':
proband_sex = 'm'
elif sex == 'female':
proband_sex = 'f'
else:
proband_sex = 'u'
json_response = add_genome_to_clinical_report(cr_id,
proband_genome_id=genome_id,
proband_sex=sex,
)
if "clinical_report" not in json_response.keys():
sys.stderr(json_response)
sys.exit("Failed to launch. Check report parameters for correctness.")
clinical_report = json_response['clinical_report']
sys.stdout.write('Clinical Report Info:\n'
'id: {}\n'
'test_type: {}\n'
'accession_id: {}\n'
'created_on: {}\n'
'created_by: {}\n'
'status: {}\n'
'filter_id: {}\n'
'panel_id: {}\n'
'filter_name: {}\n'
'workspace_id: {}\n'
'sample_collected_date: {}\n'
'sample_received_date: {}\n'
'include_cosmic: {}\n'
'vaast_report_id: {}\n'
'mother_genome_id: {}\n'
'father_genome_id: {}\n'
'genome_id: {}\n'
'version: {}\n'
.format(clinical_report.get('id', 'Missing'),
clinical_report.get('test_type','Missing'),
clinical_report.get('accession_id','Missing'),
clinical_report.get('created_on','Missing'),
clinical_report.get('created_by','Missing'),
clinical_report.get('status', 'Missing'),
clinical_report.get('filter_id','Missing'),
clinical_report.get('panel_id','Missing'),
clinical_report.get('filter_name', 'Missing'),
clinical_report.get('workspace_id','Missing'),
clinical_report.get('sample_collected_date','Missing'),
clinical_report.get('sample_received_date','Missing'),
clinical_report.get('include_cosmic','Missing'),
clinical_report.get('vaast_report_id', 'Missing'),
clinical_report.get('mother_genome_id', 'Missing'),
clinical_report.get('father_genome_id', 'Missing'),
clinical_report.get('genome_id', 'Missing'),
clinical_report.get('version', 'Missing')))
if __name__ == "__main__":
main()
| {
"repo_name": "Omicia/omicia_api_examples",
"path": "python/ClinicalReportLaunchers/upload_genome_to_panel_report.py",
"copies": "1",
"size": "6649",
"license": "mit",
"hash": -8974043081047927000,
"line_mean": 42.7434210526,
"line_max": 116,
"alpha_frac": 0.5391788239,
"autogenerated": false,
"ratio": 3.901995305164319,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9929293987737262,
"avg_score": 0.0023760282654114047,
"num_lines": 152
} |
"Add a label to all publications having other label(s). Both must exist."
from publications import utils
from publications.publication import PublicationSaver
qualifier_lookup = {'Collaborative': 3,
'Technology development': 2,
'Service': 1}
def add_label(db, new_label, existing_labels):
if not new_label:
raise ValueError('no new label given')
if not existing_labels:
raise ValueError('no existing labels given')
view = db.view('label/value', key=new_label, reduce=False)
if len(view) == 0:
raise ValueError("label %s does not exist" % new_label)
for existing_label in existing_labels:
view = db.view('label/value', key=existing_label, reduce=False)
if len(view) == 0:
raise ValueError("label %s does not exist" % existing_label)
view = db.view('publication/modified', include_docs=True)
for item in view:
qualifier = None
found = False
for existing_label in existing_labels:
if existing_label in item.doc['labels']:
found = True
if qualifier is None:
qualifier = item.doc['labels'][existing_label]
else:
qualifier = max(qualifier,
item.doc['labels'][existing_label])
if found:
for key, value in qualifier_lookup.items():
if value == qualifier:
qualifier = key
break
with PublicationSaver(doc=item.doc, db=db) as saver:
labels = item.doc['labels'].copy()
labels[new_label] = qualifier # May be None
saver['labels'] = labels
print(item.doc['_id'], item.doc['labels'], qualifier)
if __name__ == '__main__':
parser = utils.get_command_line_parser(
'Add a label to all publications having other label(s).')
parser.add_argument('--new', action='store', dest='new',
default=None, help='new label to add')
parser.add_argument('--existing', action='append', dest='existing',
default=None, help='existing label')
args = parser.parse_args()
utils.load_settings(filepath=args.settings)
db = utils.get_db()
add_label(db, args.new, args.existing)
| {
"repo_name": "pekrau/Publications",
"path": "publications/scripts/add_label.py",
"copies": "1",
"size": "2359",
"license": "mit",
"hash": -2739431958648613000,
"line_mean": 39.6724137931,
"line_max": 73,
"alpha_frac": 0.5731242052,
"autogenerated": false,
"ratio": 4.2428057553956835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5315929960595683,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.