text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from math import *
import numpy as np
from pyspheregl.utils.transformations import quaternion_matrix
def rotate_cartesian(q, v):
"""Rotate a point in Cartesian coordinates by the given quaternion,
returning a new point in Cartesian space."""
return v + 2.0 * np.cross(q[0:3], np.cross(q[0:3], v) + q[3]*v)
def tuio_to_display(tuio_x, tuio_y, resolution=1200):
"""tuio_to_polar takes an x/y coordinate given in TUIO format (values 0 to 1)
where x measures rotation around the equator and y represents to angle between
the north and south poles
Returns Cartesian co-ordinates (i.e. ready to draw onscreen). resolution
specifies the pixel resolution of the display (must be square).
"""
lon, lat = tuio_to_polar(tuio_x, tuio_y)
display_x, display_y = polar_to_display(lon, lat, resolution)
return display_x, display_y
def polar_to_tuio(lon, lat):
"""polar_to_tuio takes a long/lat pair, where long is a value between 0 and 2pi
(rotation around the equator) and lat as a value between -pi/2(south pole) and pi/2 (north pole)
Returns corresponding tuio x,y co-ordinates
"""
lon = (lon - np.pi) % (2*np.pi)
if lon<0:
lon += 2*np.pi
x = lon / (2*np.pi) + 0.5
y = 1-((lat + (np.pi/2)) / np.pi)
return x, y
def tuio_to_polar(tuio_x, tuio_y):
"""tuio_to_polar takes an x/y coordinate given in TUIO format (values 0 to 1)
where x measures rotation around the equator and y represents to angle between
the north and south poles.
The returns these values as a long/lat pair, where long is a value between 0 and 2pi
(rotation around the equator) and lat as a value between -pi/2(south pole) and pi/2 (north pole)"""
lon = ((tuio_x-0.5)*2*pi + np.pi) % (2*np.pi)
lat = pi * (1-tuio_y) - (pi/2)
if lon>np.pi:
lon -= 2*np.pi
return lon, lat
def az_to_polar(x, y):
"""Convert azimuthal x,y to polar co-ordinates"""
lat = -np.sqrt((x**2)+(y**2)) * np.pi + np.pi/2
lon = np.arctan2(-y,x)
return lon, lat
def polar_to_az(lon, lat):
"""Convert polar to azimuthal x,y co-ordinates """
r = (np.pi/2-lat)/np.pi
x,y = r * np.cos(lon), -r*np.sin(lon)
return x,y
def rawaz_to_polar(theta, r):
"""Convert azimuthal x,y to polar co-ordinates"""
lat = -r * np.pi + np.pi/2
lon = theta
return lon, lat
def polar_to_rawaz(lon, lat):
"""Convert polar to azimuthal x,y co-ordinates """
r = (np.pi/2-lat)/np.pi
return lon, r
def spiral_layout(n, C=3.6):
"""Return the spherical co-ordinates [phi, theta] for a uniform spiral layout
on the sphere, with n points.
From Nishio et. al. "Spherical SOM With Arbitrary Number of Neurons and Measure of Suitability"
WSOM 2005 pp. 323-330"""
phis = []
thetas = []
for k in range(n):
h = (2*k)/float(n-1) - 1
phi = np.arccos(h)
if k==0 or k==n-1:
theta = 0
else:
theta = thetas[-1] + (C/np.sqrt(n*(1-h**2)))
phis.append(phi-np.pi/2)
thetas.append(theta)
return list(zip(thetas, phis))
def lat_ring(lon, n):
"""Return a ring of N points around the lat for a specified input lon (input in degrees)"""
inc = 360 / float(n)
rtn_list = []
i = 0
while i < 360:
rtn_list = rtn_list + [[np.radians(i), np.radians(lon)]]
i += inc
return rtn_list
def lon_ring(lat, n):
"""Return a ring of N points around the lon for a specified input lat (input in degrees)"""
inc = 360 / float(n)
rtn_list = []
i = 0
while i < 360:
rtn_list = rtn_list + [[np.radians(lat), np.radians(i)]]
i += inc
return rtn_list
def polar_to_display(lon, lat, resolution=1200):
"""polar_to_display takes a lon,lat pair and returns an onscreen x,y co-ordinates
in pixels.
Returns Cartesian co-ordinates (i.e. ready to draw onscreen). resolution
specifies the pixel resolution of the display (must be square).
"""
r = (pi/2-lat)/pi
w = resolution/2
x,y = w + r * w * cos(lon), w - r*w*sin(lon)
return x,y
def normalize(x):
return x / np.sqrt(np.sum(x*x))
def cart_to_az(x,y,z):
"""Convert a Cartesian normal vector form directly to azimuthal coordinates"""
l = np.sqrt(x*x+y*y+z*z)
l2 = np.sqrt(x*x+y*y)
r = np.arccos(-z/l) / np.pi
return r*x/l2, -r*y/l2
def spherical_distance_cartesian(a, b):
"""Returns the spherical distance between two unit normal vectors a, b"""
return np.arctan(np.linalg.norm(np.cross(a,b)) / np.dot(a,b))
def polar_adjust_scale(lon, lat, s=1):
"""Rescale lon, lat by contracting or expanding from the north pole.
This is necessary to compensate for the not quite complete coverage of the projection
For the test PufferSphere, s=0.833 is a good compensation
s sets the scaling factor. """
r = (np.pi/2-lat)/np.pi
x,y = r * np.cos(lon)*s, r*np.sin(lon)*s
lat = -np.sqrt((x**2)+(y**2)) * np.pi + np.pi/2
lon = np.arctan2(y,x)
return lon, lat
def spherical_distance(p1, p2):
"""Given two points p1, p2 (in radians), return
the great circle distance between the two points."""
lat1, lon1 = p1
lat2, lon2 = p2
dlat = lat2-lat1
dlon = lon2-lon1
a = sin(dlat/2)**2 + cos(lat1)*cos(lat2)*sin(dlon/2)**2
c = 2*atan2(sqrt(a), sqrt(1-a))
return c
# return initial heading between two points
def spherical_course(p1, p2):
"""Return the initial heading from point p1 (in radians) to point p2 (in radians)."""
lat1, lon1 = p1
lat2, lon2 = p2
if cos(lat1)<1e-10:
if lat>0:
return pi
else:
return -pi
tc1=atan2(sin(lon1-lon2)*cos(lat2),
cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(lon1-lon2))
return tc1
def spherical_radial(p1, distance, radial):
"""Return a point distance units away from p1 (in radians) along the given
radial (in radians)"""
lon1, lat1 = p1
d = distance
tc = radial
lat =asin(sin(lat1)*cos(d)+cos(lat1)*sin(d)*cos(tc))
dlon=atan2(sin(tc)*sin(d)*cos(lat1),cos(d)-sin(lat1)*sin(lat))
lon=((lon1-dlon +pi)%(2*pi) - pi)
return lon, lat
def spherical_line(p1, p2, n=20):
"""Given two points p1, p2 (in radians), return a series of points
equispaced along the great circle connecting them. n specifies
the number of points to use"""
pts = []
p1 = (-(p1[1]-pi), p1[0])
p2 = (-(p2[1]-pi), p2[0])
d = spherical_distance(p1, p2)
# print d
lat1, lon1 = p1
lat2, lon2 = p2
if d<=0:
return []
for i in range(n):
f = i/float(n-1)
A=sin((1-f)*d)/sin(d)
B=sin(f*d)/sin(d)
x = A*cos(lat1)*cos(lon1) + B*cos(lat2)*cos(lon2)
y = A*cos(lat1)*sin(lon1) + B*cos(lat2)*sin(lon2)
z = A*sin(lat1) + B*sin(lat2)
lat=atan2(z,sqrt(x**2+y**2))
lon=atan2(y,x)
# print lat, lon
pts.append((lon, -(lat-pi)))
return pts
# import transformations
# def spherical_flat_circle(pt, rad, n=20):
# """Given a point p1 (in radians), return a series of points
# equispaced along a circle around that point, tangent to a unit
# sphere surface. The points are returned in Cartesian space,
# along with a set of normals which point outwards along
# the sphere centre vector.
# rad specifies the radius.
# n specifies the number of points to use. """
# centre = spherical_to_cartesian(pt)
# rotate = transformations.rotation_matrix(2*np.pi/n,centre)[:3,:3]
# pt = np.cross(np.array(centre), np.array([0,0,1]))
# pt = pt/np.linalg.norm(pt)
# pt = (pt-centre)*rad + centre
# pts = []
# norms = []
# for i in range(n):
# pt = np.dot(pt, rotate)
# pts.append(pt)
# norms.append(np.array(centre))
# return pts, norms
def spherical_circle(p1, rad, n=20):
"""Given a point p1 (in radians), return a series of points
equispaced along a circle around that point. n specifies the number of
points to use. """
pts = []
for i in range(n):
f = i/float(n)
lon,lat = p1
lon, lat = spherical_radial((lon,lat), rad, f*2*pi)
pts.append((lon,lat))
return pts
def spherical_arc(p1, radius, arc_1, arc_2, n=20):
"""Given a point p1 (in radians), return a series of points
equispaced along an arc around that point. n specifies the number of
points to use. """
pts = []
# Determine where n number of points will fall
start=arc_1
end = arc_2
if arc_1 > arc_2:
start,end = end,start
angle = end - start
increment = angle/float(n)
while start < end:
lon,lat = p1
lon, lat = spherical_radial((lon,lat), radius, start)
pts.append((lon,lat))
start += increment
return pts
def spherical_midpoint(p1, p2):
"""Return the midpoint of p1, p2, in lot, lat format"""
p1 = (-(p1[1]-pi), p1[0])
p2 = (-(p2[1]-pi), p2[0])
d = spherical_distance(p1, p2)
lat1, lon1 = p1
lat2, lon2 = p2
if d<=0:
return None
f = 0.5
A=sin((1-f)*d)/sin(d)
B=sin(f*d)/sin(d)
x = A*cos(lat1)*cos(lon1) + B*cos(lat2)*cos(lon2)
y = A*cos(lat1)*sin(lon1) + B*cos(lat2)*sin(lon2)
z = A*sin(lat1) + B*sin(lat2)
lat=atan2(z,sqrt(x**2+y**2))
lon=atan2(y,x)
return (lon, -(lat-pi))
def subdivide_spherical_triangles(vertices, faces, uv=None):
# should also generate UV co-ordinates...
newfaces = []
vertices = list(vertices)
def midpoint(p1, p2):
return ((p1[0]+p2[0])/2,(p1[1]+p2[1])/2)
for face in faces:
v1 = spherical_midpoint(vertices[face[0]],vertices[face[1]])
v2 = spherical_midpoint(vertices[face[1]],vertices[face[2]])
v3 = spherical_midpoint(vertices[face[2]],vertices[face[0]])
if uv:
uv1 = midpoint(uv[face[0]], uv[face[1]])
uv2 = midpoint(uv[face[1]], uv[face[2]])
uv3 = midpoint(uv[face[2]], uv[face[0]])
uv += [uv1,uv2,uv3]
vindex1 = len(vertices)
vindex2 = len(vertices)+1
vindex3 = len(vertices)+2
vertices += [v1,v2,v3]
# new face
newfaces.append((vindex3, vindex1, vindex2))
newfaces.append((vindex3, face[0], vindex1))
newfaces.append((vindex2, vindex1, face[1]))
newfaces.append((face[2],vindex3, vindex2))
return vertices, newfaces, uv
def subdivide_spherical_quads(vertices, faces, uv=None):
# should also generate UV co-ordinates...
newfaces = []
vertices = list(vertices)
def midpoint(p1, p2):
return ((p1[0]+p2[0])/2,(p1[1]+p2[1])/2)
for face in faces:
v1 = spherical_midpoint(vertices[face[0]],vertices[face[1]])
v2 = spherical_midpoint(vertices[face[1]],vertices[face[2]])
v3 = spherical_midpoint(vertices[face[2]],vertices[face[3]])
v4 = spherical_midpoint(vertices[face[3]],vertices[face[0]])
v7 = spherical_midpoint(v2,v4)
if uv:
uv1 = midpoint(uv[face[0]], uv[face[1]])
uv2 = midpoint(uv[face[1]], uv[face[2]])
uv3 = midpoint(uv[face[2]], uv[face[3]])
uv4 = midpoint(uv[face[3]], uv[face[0]])
uv5 = midpoint(uv4, uv2)
uv += [uv1,uv2,uv3, uv4, uv5]
vindex1 = len(vertices)
vindex2 = len(vertices)+1
vindex3 = len(vertices)+2
vindex4 = len(vertices)+3
vindex5 = len(vertices)+4
vertices += [v1,v2,v3,v4,v7]
# new face
newfaces.append((face[0], vindex1, vindex5, vindex4))
newfaces.append((vindex1, face[1], vindex2, vindex5))
newfaces.append((vindex5, vindex2, face[2], vindex3))
newfaces.append((vindex4, vindex5, vindex3, face[3]))
return vertices, newfaces, uv
def spherical_triangle(pts,uv=None,iter=2):
"""Return a triangle mesh for the triangle given by pts (in (lon,lat) pair form).
Triangle is subdivied iter times; don't use more than 3 or 4!
"""
vertices = pts
faces = [[0,1,2]]
for i in range(iter):
vertices, faces,uv = subdivide_spherical_triangles(vertices, faces, uv)
return vertices, faces, uv
def spherical_quad(pts, iter=2, uv=None, **kwargs):
"""Return a quad mesh for the quadrilateral given by pts (in (lon,lat) pair form).
Triangle is subdivied iter times; don't use more than 3 or 4!
"""
vertices = pts
faces = [[0,1,2,3]]
for i in range(iter):
vertices, faces, uv = subdivide_spherical_quads(vertices, faces, uv)
return vertices, faces, uv
import numpy as np
def spherical_to_cartesian(pt):
"""Convert a lon, lat co-ordinate to an a Cartesian x,y,z point on the unit sphere."""
lon, lat = pt
lat += np.pi/2
st = np.sin(lat)
x = np.cos(lon) * st
y = np.sin(lon) * st
z = -np.cos(lat) # to match shader
return x,y,z
def polar_to_cart(lon, lat):
lat += np.pi/2
st = np.sin(lat)
x = np.cos(lon) * st
y = np.sin(lon) * st
z = np.cos(lat)
return x,y,z
def polar_to_cart2(lon, lat):
# lat -= np.pi/2
st = np.sin(lat)
x = np.cos(lon) * st
y = np.sin(lon) * st
z = np.cos(lat)
return x,y,z
def cartesian_to_spherical(pt):
"""Convert a Cartesian 3D point to lon, lat co-ordinates of the projection
onto the unit sphere."""
pt = np.array(pt)
n = np.sqrt(pt.dot(pt))
pt = pt / n
lat = np.arccos(pt[2]) - np.pi/2
lon = np.arctan2(pt[1], pt[0])
return lon, lat
def cart_to_polar(x,y,z):
return cartesian_to_spherical([x,y,z])
def tangent_coord_system(origin, up_point):
"""Given a pair of points in Cartesian co-ordinates on a unit sphere,
return three vectors representing an orthogonal co-ordinate system,
which touches the sphere at origin, and has an up vector pointing towards
the projection of up_point out from the sphere onto the tangent plane
which touches origin. """
v = origin - up_point
normal = origin / np.sqrt(origin.dot(origin))
d = np.dot(up_point, origin)
proj = up_point - d*normal
# form the co-ordinate system via the cross product
up = proj / np.sqrt(proj.dot(proj))
forward = normal
right = np.cross(up, forward)
return up, right, forward
def spherical_rectangle(centre, width, height, up, x_ratio=1, y_ratio=1, **kwargs):
"""
Return a spherical rectangle given by rect and up vector.
lat, lon give the centre of the rectangle
w, h give the width and height of the rectangle in cartesian units
up_lat, up_lon, give the direction of the up vector of the rectangle
Drawing procedes as follows:
Position is converted to Cartesian co-ordinates on the sphere's surface
Up vector is converted to Cartesian co-ordinates on the sphere's surface
Up vector is projected onto tangent plane by shooting a ray from the up point to the tangent plane
Right vector is produced from the cross product of the up vector and the normal vector
Rectangle is drawn on tangent plane using up and right vector
Rectangle is projected onto sphere by normalising the co-ordinates
Cartesian rectangle co-ordinates are converted back to spherical co-ordinates
All other arguments are passed directly onto spherical_quad, which
forms the great circle sections to make up the square patch.
"""
# convert to cartesian
orig = np.array(spherical_to_cartesian(centre))
upv = np.array(spherical_to_cartesian(up))
# form co-ordinate system
up, right, forward = tangent_coord_system(orig, upv)
# create rectangle
p1 = orig - right*width - up*height
p2 = orig + right*width - up*height
p3 = orig + right*width + up*height
p4 = orig - right*width + up*height
# project onto sphere and convert to spherical
pts = [cartesian_to_spherical(p) for p in [p1,p2,p3,p4]]
uv = [[0.0,0.0], [x_ratio,0.0], [x_ratio,y_ratio], [0.0,y_ratio]]
return spherical_quad(pts, uv=uv, **kwargs)
|
{
"content_hash": "f5cd93aefdc7a684a0d5380ac2562ab5",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 103,
"avg_line_length": 33.467871485943775,
"alnum_prop": 0.5890682186356273,
"repo_name": "johnhw/pypuffersphere",
"id": "1618339c45d4a7164b2988bc2d6fb54ce71d9dfd",
"size": "16667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspheregl/sphere/sphere.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "176821"
}
],
"symlink_target": ""
}
|
"""
WSGI config for CharlieChat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CharlieChat.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
{
"content_hash": "16a13cfe7828c2d72abde8de27d8dfb0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 27.27777777777778,
"alnum_prop": 0.7942973523421588,
"repo_name": "signalw/charliechat",
"id": "47d2b3f3a901aee8b7bf49a930e461ecbf3258fd",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CharlieChat/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12395"
},
{
"name": "HTML",
"bytes": "9118"
},
{
"name": "JavaScript",
"bytes": "2510"
},
{
"name": "Python",
"bytes": "97132"
}
],
"symlink_target": ""
}
|
from django.db import models
"""
Representa las opciones de estados que tiene cada título nacional
"""
class EstadoPostituloNacional(models.Model):
NO_VIGENTE = u'No vigente'
VIGENTE = u'Vigente'
nombre = models.CharField(max_length=50, unique=True)
class Meta:
app_label = 'postitulos'
ordering = ['nombre']
db_table = 'postitulos_estado_postitulo_nacional'
def __unicode__(self):
return self.nombre
|
{
"content_hash": "027f97f3fee88be3cdbb6c2a53bdab1a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 24.105263157894736,
"alnum_prop": 0.6681222707423581,
"repo_name": "MERegistro/meregistro",
"id": "aa96d1e215106903c90d107401a61546403bed52",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meregistro/apps/postitulos/models/EstadoPostituloNacional.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "79500"
},
{
"name": "HTML",
"bytes": "782188"
},
{
"name": "JavaScript",
"bytes": "106755"
},
{
"name": "PLpgSQL",
"bytes": "515442"
},
{
"name": "Python",
"bytes": "7190737"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
}
|
from nltk import metrics
class LevenshteinReduce(object):
def __init__(self, phrase, tracks):
"""
:param phrase: (str) phrase or ngram
:param tracks: (list) tacks to perform best string matching with
:return: Returns the track from the list of tracks best matching the given phrase
"""
self.phrases = phrase
self.tracks = tracks
def get_most_similar_track(self):
"""
Determines the levenshtein distance between each track and phrase
:return: track (object) the track with the smallest levenshtein with the phrase
"""
if self.tracks is None:
return
levenshteins = [
{
'levenshtein': metrics.edit_distance(self.phrases, track['name']),
'url': track['url'],
'name': track['name'],
'artist': track['artist'],
'image': track['image'],
'phrase': self.phrases,
}
for track in self.tracks
]
minimum_distance = None
if levenshteins:
minimum_distance = reduce(
lambda d1, d2: d1 if d1['levenshtein'] < d2['levenshtein'] else d2,
levenshteins
)
return minimum_distance
|
{
"content_hash": "6abe26d5d0ee55d495d0d036d79a41f8",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 89,
"avg_line_length": 31.902439024390244,
"alnum_prop": 0.5412844036697247,
"repo_name": "husman/WoTrack",
"id": "e5359c7a899e33237e91249d8aac31e4c15f1026",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/wordtrack/levenshtein_reduce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "549"
},
{
"name": "HTML",
"bytes": "2910"
},
{
"name": "Python",
"bytes": "16380"
}
],
"symlink_target": ""
}
|
"""
Template tags used in admin or board
"""
from django import template
register = template.Library()
@register.filter(name='calcul_indent')
def calcul_indent(value, coef=20):
return (value+1)*coef
|
{
"content_hash": "e9acf5f89dc84b9f7e810c8fec0292a6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 20.5,
"alnum_prop": 0.7268292682926829,
"repo_name": "sveetch/sveedocuments",
"id": "2eae1fbdb98c0cf9ca2d33af7d3de3db2724dc60",
"size": "229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sveedocuments/templatetags/documents_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "394407"
},
{
"name": "HTML",
"bytes": "27149"
},
{
"name": "JavaScript",
"bytes": "105924"
},
{
"name": "Python",
"bytes": "145236"
},
{
"name": "Ruby",
"bytes": "1005"
}
],
"symlink_target": ""
}
|
"""The users module."""
from . import views
|
{
"content_hash": "1d826f7c5da5fd1444fbb43a5d65d90a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 23,
"avg_line_length": 14.666666666666666,
"alnum_prop": 0.6590909090909091,
"repo_name": "dtnewman/zappa_boilerplate",
"id": "83e231a187b94b68b53de33c94adc0d8d2a8d189",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zappa_boilerplate/user/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10095"
},
{
"name": "Python",
"bytes": "22324"
}
],
"symlink_target": ""
}
|
import gzip
import io
from ..file import _File
from .base import NonstandardExtHDU
from .hdulist import HDUList
from ..header import Header, _pad_length
from ..util import fileobj_name
from ....utils import lazyproperty
class FitsHDU(NonstandardExtHDU):
"""
A non-standard extension HDU for encapsulating entire FITS files within a
single HDU of a container FITS file. These HDUs have an extension (that is
an XTENSION keyword) of FITS.
The FITS file contained in the HDU's data can be accessed by the `hdulist`
attribute which returns the contained FITS file as an `HDUList` object.
"""
_extension = 'FITS'
@lazyproperty
def hdulist(self):
self._file.seek(self._data_offset)
fileobj = io.BytesIO()
# Read the data into a BytesIO--reading directly from the file
# won't work (at least for gzipped files) due to problems deep
# within the gzip module that make it difficult to read gzip files
# embedded in another file
fileobj.write(self._file.read(self.size))
fileobj.seek(0)
if self._header['COMPRESS']:
fileobj = gzip.GzipFile(fileobj=fileobj)
return HDUList.fromfile(fileobj, mode='readonly')
@classmethod
def fromfile(cls, filename, compress=False):
"""
Like `FitsHDU.fromhdulist()`, but creates a FitsHDU from a file on
disk.
Parameters
----------
filename : str
The path to the file to read into a FitsHDU
compress : bool, optional
Gzip compress the FITS file
"""
return cls.fromhdulist(HDUList.fromfile(filename), compress=compress)
@classmethod
def fromhdulist(cls, hdulist, compress=False):
"""
Creates a new FitsHDU from a given HDUList object.
Parameters
----------
hdulist : HDUList
A valid Headerlet object.
compress : bool, optional
Gzip compress the FITS file
"""
fileobj = bs = io.BytesIO()
if compress:
if hasattr(hdulist, '_file'):
name = fileobj_name(hdulist._file)
else:
name = None
fileobj = gzip.GzipFile(name, mode='wb', fileobj=bs)
hdulist.writeto(fileobj)
if compress:
fileobj.close()
# A proper HDUList should still be padded out to a multiple of 2880
# technically speaking
padding = (_pad_length(bs.tell()) * cls._padding_byte).encode('ascii')
bs.write(padding)
bs.seek(0)
cards = [
('XTENSION', cls._extension, 'FITS extension'),
('BITPIX', 8, 'array data type'),
('NAXIS', 1, 'number of array dimensions'),
('NAXIS1', len(bs.getvalue()), 'Axis length'),
('PCOUNT', 0, 'number of parameters'),
('GCOUNT', 1, 'number of groups'),
]
# Add the XINDn keywords proposed by Perry, though nothing is done with
# these at the moment
if len(hdulist) > 1:
for idx, hdu in enumerate(hdulist[1:]):
cards.append(('XIND' + str(idx + 1), hdu._header_offset,
'byte offset of extension {}'.format(idx + 1)))
cards.append(('COMPRESS', compress, 'Uses gzip compression'))
header = Header(cards)
return cls._readfrom_internal(_File(bs), header=header)
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != 'XTENSION':
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return xtension == cls._extension
# TODO: Add header verification
def _summary(self):
# TODO: Perhaps make this more descriptive...
return (self.name, self.ver, self.__class__.__name__, len(self._header))
|
{
"content_hash": "68bbe43e1b9135bc44461e1a54c3c221",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 80,
"avg_line_length": 32.368852459016395,
"alnum_prop": 0.5907824765763484,
"repo_name": "DougBurke/astropy",
"id": "c2c811aad6b3729ae6b1928c212e7821d71797e2",
"size": "4013",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/io/fits/hdu/nonstandard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "367279"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "8390850"
},
{
"name": "TeX",
"bytes": "805"
}
],
"symlink_target": ""
}
|
"""
oar.rest_api.views.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Define resources api interaction
"""
from __future__ import division
from flask import url_for, g
from oar.lib import db
from oar.lib.models import Resource
from . import Blueprint
from ..utils import Arg
app = Blueprint('resources', __name__, url_prefix="/resources")
@app.route('/', methods=['GET'])
@app.route('/<any(details, full):detailed>', methods=['GET'])
@app.route('/nodes/<string:network_address>', methods=['GET'])
@app.args({'offset': Arg(int, default=0),
'limit': Arg(int)})
def index(offset, limit, network_address=None, detailed=False):
"""Replie a comment to the post.
:param offset: post's unique id
:type offset: int
:form email: author email address
:form body: comment body
:reqheader Accept: the response content type depends on
:mailheader:`Accept` header
:status 302: and then redirects to :http:get:`/resources/(int:resource_id)`
:status 400: when form parameters are missing
"""
query = db.queries.get_resources(network_address, detailed)
page = query.paginate(offset, limit)
g.data['total'] = page.total
g.data['links'] = page.links
g.data['offset'] = offset
g.data['items'] = []
for item in page:
attach_links(item)
g.data['items'].append(item)
@app.route('/<int:resource_id>', methods=['GET'])
def show(resource_id):
resource = Resource.query.get_or_404(resource_id)
g.data.update(resource.asdict())
attach_links(g.data)
@app.route('/<int:resource_id>/jobs', methods=['GET'])
def jobs(resource_id):
g.data.update(Resource.query.get_or_404(resource_id).asdict())
def attach_links(resource):
rel_map = (
("node", "member", "index"),
("show", "self", "show"),
("jobs", "collection", "jobs"),
)
links = []
for title, rel, endpoint in rel_map:
if title == "node" and "network_address" in resource:
url = url_for('%s.%s' % (app.name, endpoint),
network_address=resource['network_address'])
links.append({'rel': rel, 'href': url, 'title': title})
elif title != "node" and "id" in resource:
url = url_for('%s.%s' % (app.name, endpoint),
resource_id=resource['id'])
links.append({'rel': rel, 'href': url, 'title': title})
resource['links'] = links
|
{
"content_hash": "08547dd68120119bbc6cfa1624e71d72",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 31.192307692307693,
"alnum_prop": 0.6017262638717632,
"repo_name": "fr0uty/oartm",
"id": "3d2cc725d91eb390b3efbd3b22edc24899b68581",
"size": "2481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oar/rest_api/views/resource.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2765"
},
{
"name": "Perl",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "601158"
},
{
"name": "Shell",
"bytes": "5491"
}
],
"symlink_target": ""
}
|
import json
data = dict()
with open("config/config.json") as config_data:
data['config'] = json.load(config_data)
with open("config/creds.json") as creds_data:
data['creds'] = json.load(creds_data)
|
{
"content_hash": "50b54b761541e48469bf4a211f06964f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 47,
"avg_line_length": 22.555555555555557,
"alnum_prop": 0.6995073891625616,
"repo_name": "competitiveoverwatch/RankVerification",
"id": "184f3876dfb5519867065aca89b780e6c8c67afc",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15658"
},
{
"name": "HTML",
"bytes": "13166"
},
{
"name": "Python",
"bytes": "29540"
}
],
"symlink_target": ""
}
|
import logging
from box import CredentialsV2, BoxClient
from box.client import BoxClientException
from modularodm import fields
import requests
from framework.auth import Auth
from framework.exceptions import HTTPError
from website.addons.base import exceptions
from website.addons.base import AddonOAuthUserSettingsBase, AddonOAuthNodeSettingsBase
from website.addons.base import StorageAddonBase
from website.addons.box import settings
from website.addons.box.serializer import BoxSerializer
from website.oauth.models import ExternalProvider
logger = logging.getLogger(__name__)
class Box(ExternalProvider):
name = 'Box'
short_name = 'box'
client_id = settings.BOX_KEY
client_secret = settings.BOX_SECRET
auth_url_base = settings.BOX_OAUTH_AUTH_ENDPOINT
callback_url = settings.BOX_OAUTH_TOKEN_ENDPOINT
auto_refresh_url = callback_url
refresh_time = settings.REFRESH_TIME
expiry_time = settings.EXPIRY_TIME
default_scopes = ['root_readwrite']
def handle_callback(self, response):
"""View called when the Oauth flow is completed. Adds a new BoxUserSettings
record to the user and saves the user's access token and account info.
"""
client = BoxClient(CredentialsV2(
response['access_token'],
response['refresh_token'],
settings.BOX_KEY,
settings.BOX_SECRET,
))
about = client.get_user_info()
return {
'provider_id': about['id'],
'display_name': about['name'],
'profile_url': 'https://app.box.com/profile/{0}'.format(about['id'])
}
class BoxUserSettings(AddonOAuthUserSettingsBase):
"""Stores user-specific box information
"""
oauth_provider = Box
serializer = BoxSerializer
def revoke_remote_oauth_access(self, external_account):
try:
# TODO: write client for box, stop using third-party lib
requests.request(
'POST',
settings.BOX_OAUTH_REVOKE_ENDPOINT,
params={
'client_id': settings.BOX_KEY,
'client_secret': settings.BOX_SECRET,
'token': external_account.oauth_key,
}
)
except requests.HTTPError:
pass
class BoxNodeSettings(StorageAddonBase, AddonOAuthNodeSettingsBase):
oauth_provider = Box
serializer = BoxSerializer
folder_id = fields.StringField(default=None)
folder_name = fields.StringField()
folder_path = fields.StringField()
_folder_data = None
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Box(self.external_account)
return self._api
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder_id)
def fetch_folder_name(self):
self._update_folder_data()
return getattr(self, 'folder_name', '').replace('All Files', '/ (Full Box)')
def fetch_full_folder_path(self):
self._update_folder_data()
return self.folder_path
def _update_folder_data(self):
if self.folder_id is None:
return None
if not self._folder_data:
try:
Box(self.external_account).refresh_oauth_key()
client = BoxClient(self.external_account.oauth_key)
self._folder_data = client.get_folder(self.folder_id)
except BoxClientException:
return
self.folder_name = self._folder_data['name']
self.folder_path = '/'.join(
[x['name'] for x in self._folder_data['path_collection']['entries']]
+ [self._folder_data['name']]
)
self.save()
def set_folder(self, folder_id, auth):
self.folder_id = str(folder_id)
self._update_folder_data()
self.save()
self.nodelogger.log(action='folder_selected', save=True)
def clear_settings(self):
self.folder_id = None
self.folder_name = None
self.folder_path = None
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
folder_id = self.folder_id
self.clear_settings()
if add_log:
extra = {'folder_id': folder_id}
self.nodelogger.log(action='node_deauthorized', extra=extra, save=True)
self._update_folder_data()
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
try:
Box(self.external_account).refresh_oauth_key()
return {'token': self.external_account.oauth_key}
except BoxClientException as error:
raise HTTPError(error.status_code, data={'message_long': error.message})
def serialize_waterbutler_settings(self):
if self.folder_id is None:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder_id}
def create_waterbutler_log(self, auth, action, metadata):
self.owner.add_log(
'box_{0}'.format(action),
auth=auth,
params={
'path': metadata['materialized'],
'project': self.owner.parent_id,
'node': self.owner._id,
'folder': self.folder_id,
'urls': {
'view': self.owner.web_url_for('addon_view_or_download_file', provider='box', action='view', path=metadata['path']),
'download': self.owner.web_url_for('addon_view_or_download_file', provider='box', action='download', path=metadata['path']),
},
},
)
##### Callback overrides #####
def after_delete(self, node=None, user=None):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.clear_auth()
self.save()
|
{
"content_hash": "1938c5656fd381d8d712f1463458de53",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 144,
"avg_line_length": 32.36649214659686,
"alnum_prop": 0.6041734066645099,
"repo_name": "abought/osf.io",
"id": "d9e030f231bc40d4b7ba753afa9044150319ea74",
"size": "6206",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "website/addons/box/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "157412"
},
{
"name": "HTML",
"bytes": "110361"
},
{
"name": "JavaScript",
"bytes": "1634802"
},
{
"name": "Mako",
"bytes": "666400"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5569606"
}
],
"symlink_target": ""
}
|
import sys
import numpy as np
from spinalcordtoolbox.image import Image, empty_like, add_suffix
from spinalcordtoolbox.utils import SCTArgumentParser, Metavar, parse_num_list, init_sct, printv, set_loglevel
# PARAMETERS
class Param(object):
# The constructor
def __init__(self):
self.almost_zero = np.finfo(float).eps
def get_parser():
parser = SCTArgumentParser(
description='Compute SNR using methods described in [Dietrich et al., Measurement of'
' signal-to-noise ratios in MR images: Influence of multichannel coils, parallel '
'imaging, and reconstruction filters. J Magn Reson Imaging 2007; 26(2): 375-385].'
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
'-i',
required=True,
help="R|Image to compute the SNR on. (Example: b0s.nii.gz)\n"
"- For '-method diff' and '-method mult', the image must be 4D, as SNR will be computed "
"along the 4th dimension.\n"
"- For '-method single', the image can either be 3D or 4D. If a 4D image is passed, a specific "
"3D volume should be specified using the '-vol' argument.",
metavar=Metavar.file)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
optional.add_argument(
'-m',
help='Binary (or weighted) mask within which SNR will be averaged. Example: dwi_moco_mean_seg.nii.gz',
metavar=Metavar.file,
default='')
optional.add_argument(
'-m-noise',
help="Binary (or weighted) mask within which noise will be calculated. Only valid for '-method single'.",
metavar=Metavar.file,
default='')
optional.add_argument(
'-method',
help='R|Method to use to compute the SNR (default: diff):\n'
" - diff: Substract two volumes (defined by -vol) and estimate noise variance within the ROI "
"(flag '-m' is required). Requires a 4D volume.\n"
" - mult: Estimate noise variance over time across volumes specified with '-vol'. Requires a 4D volume.\n"
" - single: Compute the mean signal in the mask specified by '-m' and estimate the noise variance in a "
"mask specified by '-m-noise'. If the noise mask is in the background (air), the noise variance needs to "
"be corrected for Rayleigh distribution (set '-rayleigh 1'). If the noise mask is located in a "
"region with high signal (eg: tissue), noise distribution can be assumed Gaussian and there is no need to "
"correct for Rayleigh distribution (use '-rayleigh 0'). This implementation corresponds to the SNRstdv "
"in the Dietrich et al. article. Uses a 3D or a 4D volume. If a 4D volume is input, the volume to "
"compute SNR on is specified by '-vol'.",
choices=('diff', 'mult', 'single'),
default='diff')
optional.add_argument(
'-vol',
help="R|Volumes to compute SNR from. Separate with ',' (Example: '-vol 0,1'), or select range "
"using ':' (Example: '-vol 2:50'). If this argument is not passed:\n"
" - For '-method mult', all volumes will be used.\n"
" - For '-method diff', the first two volumes will be used.\n"
" - For '-method single', the first volume will be used.",
metavar=Metavar.str,
default='')
optional.add_argument(
'-rayleigh',
type=int,
help="Correct for Rayleigh distribution. It is recommended to always use this correction for the 'diff' method "
"and to use it with the 'single' method in case the noise mask is taken in a region with low SNR (e.g., "
"the air). ",
default=1,
choices=(0, 1))
optional.add_argument(
'-r',
type=int,
help='Remove temporary files.',
default=1,
choices=(0, 1))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
optional.add_argument(
'-o',
metavar=Metavar.str,
type=str,
default=None,
help="File name to write the computed SNR to."
)
return parser
def weighted_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
Source: https://stackoverflow.com/questions/2413522/weighted-standard-deviation-in-numpy
"""
average = np.average(values, weights=weights)
# Fast and numerically precise:
variance = np.average((values - average) ** 2, weights=weights)
return np.sqrt(variance)
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
rayleigh_correction = arguments.rayleigh
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise parser.error(f"Argument '-m' must be specified when using '-method {method}'.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
nz = data.shape[2]
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Mask should be a 3D image, but the input mask has shape '{mask.shape}'.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method == 'mult':
index_vol = range(data.shape[3])
elif method == 'diff':
index_vol = [0, 1]
elif method == 'single':
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean across the two volumes
data_mean = np.mean(data_2vol, axis=3)
# Compute mean in ROI for each z-slice, if the slice in the mask is not null
mean_in_roi = [np.average(data_mean[..., iz], weights=mask[..., iz])
for iz in range(nz) if np.any(mask[..., iz])]
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
# Compute STD in the ROI for each z-slice. The "np.sqrt(2)" results from the variance of the subtraction of two
# distributions: var(A-B) = var(A) + var(B).
# More context in: https://github.com/spinalcordtoolbox/spinalcordtoolbox/issues/3481
std_in_roi = [weighted_std(data_sub[..., iz] / np.sqrt(2), weights=mask[..., iz])
for iz in range(nz) if np.any(mask[..., iz])]
# Compute SNR
snr_roi_slicewise = [m/s for m, s in zip(mean_in_roi, std_in_roi)]
snr_roi = sum(snr_roi_slicewise) / len(snr_roi_slicewise)
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the index of the volume with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise parser.error("A noise mask is mandatory with '-method single'.")
# Check dimensionality of the noise mask
if len(mask_noise.shape) != 3:
raise ValueError(f"Input noise mask dimension: {dim}. Input dimension for the noise mask should be 3.")
# Check that non-null slices are consistent between mask and mask_noise.
for iz in range(nz):
if not np.any(mask[..., iz]) == np.any(mask_noise[..., iz]):
raise ValueError(f"Slice {iz} is empty in either mask or mask_noise. Non-null slices should be "
f"consistent between mask and mask_noise.")
# Compute mean in ROI for each z-slice, if the slice in the mask is not null
mean_in_roi = [np.average(data3d[..., iz], weights=mask[..., iz])
for iz in range(nz) if np.any(mask[..., iz])]
std_in_roi = [weighted_std(data3d[..., iz], weights=mask_noise[..., iz])
for iz in range(nz) if np.any(mask_noise[..., iz])]
# Compute SNR
snr_roi_slicewise = [m/s for m, s in zip(mean_in_roi, std_in_roi)]
snr_roi = sum(snr_roi_slicewise) / len(snr_roi_slicewise)
if rayleigh_correction:
# Correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi *= np.sqrt((4 - np.pi) / 2)
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
|
{
"content_hash": "15f5c3f1e97c8ded4dd990e5bd911763",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 120,
"avg_line_length": 45.01901140684411,
"alnum_prop": 0.597972972972973,
"repo_name": "neuropoly/spinalcordtoolbox",
"id": "d78b441d3bbe7ca8b6123b61befa536c5e5904a9",
"size": "12621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spinalcordtoolbox/scripts/sct_compute_snr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5931"
},
{
"name": "C++",
"bytes": "629016"
},
{
"name": "CMake",
"bytes": "7000"
},
{
"name": "CSS",
"bytes": "1237"
},
{
"name": "Dockerfile",
"bytes": "293"
},
{
"name": "HTML",
"bytes": "11480"
},
{
"name": "JavaScript",
"bytes": "3171"
},
{
"name": "MATLAB",
"bytes": "120557"
},
{
"name": "Python",
"bytes": "2052822"
},
{
"name": "Rich Text Format",
"bytes": "1619"
},
{
"name": "Shell",
"bytes": "61227"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import TemplateView, ListView, FormView, DetailView
from django.core.exceptions import PermissionDenied
from net.models import Scripts, Job, Equipment, Subnets, EquipmentConfig
from net.forms import TaskForm, ArchiveTasksForm, SubnetForm, NEListForm, ConfigSearchForm, CMDRunnerForm
from django.contrib import messages
from net.equipment.generic import GenericEquipment
from net.lib import celery_job_starter, scan_nets_with_fping, discover_vendor, cmd_to_celery
from argus.models import Client, ASTU
import re
# Create your views here.
class Demo(LoginRequiredMixin, TemplateView):
template_name = 'net/demo.html'
def get(self, request, *args, **kwargs):
eq_device = Equipment.objects.get(ne_ip='10.205.18.247') # equipment object
eq = GenericEquipment(eq_device)
eq.set_io_timeout(1)
eq.suggest_login(resuggest=False)
eq.do_login()
eq.discover_vendor()
return render(request, self.template_name, *args, **kwargs)
class PickNE(LoginRequiredMixin, TemplateView):
template_name = 'net/pick_ne.html'
def get_context_data(self, **kwargs):
context = super(PickNE, self).get_context_data(**kwargs)
possible_scripts = Scripts.objects.all().exclude(is_hidden=True)
context['possible_scripts'] = possible_scripts
return context
class DoTask(LoginRequiredMixin, TemplateView):
template_name = 'net/do_task.html'
def get(self, *args, **kwargs):
raise PermissionDenied
def post(self, request):
"""
Нужно запустить стартер, который получит на вход список ID назначений, имя скрипта для выполнения, и возможно,
какие-то дополнительные аргументы.
:param request:
:return:
"""
destinations_ids = request.POST.getlist('destinations')
script_id = request.POST['script_select']
celery_job_starter(destinations_ids, script_id)
args = dict()
return render(request, self.template_name, args)
class ActiveTasks(LoginRequiredMixin, ListView, FormView):
model = Job
template_name = 'net/active_tasks.html'
form_class = TaskForm
paginate_by = 9
success_url = '/net/active_tasks'
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_queryset(self):
if self.request.method == 'POST':
form = TaskForm(self.request.POST)
if form.is_valid():
task_status = form.cleaned_data['task_status']
if task_status != '':
return Job.objects.filter(status=task_status)
return Job.objects.all().exclude(status='ARCHIVED').exclude(status='TERMINATED')
if self.request.method == 'GET':
if self.request.GET.get('task_status') and (self.request.GET.get('task_status') != 'None'):
return Job.objects.filter(status=self.request.GET.get('task_status'))
return Job.objects.all().exclude(status='ARCHIVED').exclude(status='TERMINATED')
def get_context_data(self, **kwargs):
context = super(ActiveTasks, self).get_context_data(**kwargs)
task_status = None
if self.request.method == 'POST':
form = TaskForm(self.request.POST)
if form.is_valid():
task_status = form.cleaned_data['task_status']
if self.request.method == 'GET':
task_status = self.request.GET.get('task_status')
context['task_status'] = task_status
return context
class ArchiveTasks(LoginRequiredMixin, FormView):
template_name = 'net/archive_tasks.html'
form_class = ArchiveTasksForm
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ArchiveTasks, self).get_context_data(**kwargs)
if self.request.method == 'POST':
Job.objects.filter(status='SUCCESS').update(status='ARCHIVED')
messages.add_message(self.request, messages.INFO, 'Архивация выполена')
return context
class TaskDetail(LoginRequiredMixin, TemplateView):
template_name = 'net/task_detail.html'
class DiscoverSubnets(LoginRequiredMixin, FormView):
template_name = 'net/discover_subnets.html'
form_class = SubnetForm
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(DiscoverSubnets, self).get_context_data(**kwargs)
context['new'] = False
context['found'] = False
if self.request.method == 'POST':
form = SubnetForm(self.request.POST)
if form.is_valid():
subnets = form.cleaned_data['subnets'].split("\r\n") # lists with subnet
cast_to_celery = form.cleaned_data['cast_to_celery'] # "Send discovery task to Celery" checkbox
discover_task = form.cleaned_data['discover_task'] # Task type
context['cast_to_celery'] = cast_to_celery
if discover_task == 'fping':
if not cast_to_celery:
found, new = scan_nets_with_fping(subnets)
context['found'] = found
context['new'] = new
else:
celery_job_starter(subnets, '999') # 999 will be send task to celery for subnets scan
if discover_task == 'vendor':
if not cast_to_celery:
discover_vendor(subnets)
else:
celery_job_starter(subnets, '1000')
pass
if discover_task == 'config':
if not cast_to_celery:
# discover_config(subnets)
pass
else:
celery_job_starter(subnets, '1001')
pass
if discover_task == 'put_syslocation':
if not cast_to_celery:
# only in celery
pass
else:
celery_job_starter(subnets, '1002')
return context
class ClientsCount(LoginRequiredMixin, TemplateView):
template_name = 'net/clients_count.html'
def get_context_data(self, **kwargs):
result_dict = dict()
clients = Client.objects.all()
for client in clients:
hostname = client.hostname
hostname_parts = hostname.split('-')
try:
node_name = hostname_parts[0] + '-' + hostname_parts[1] + '-' + hostname_parts[2]
if node_name in result_dict:
result_dict[node_name] += 1
else:
result_dict[node_name] = 1
except IndexError:
# skip
# print(hostname)
pass
result_str = ''
for node in result_dict:
try:
astu_objects = ASTU.objects.filter(hostname__contains=node).filter(status='эксплуатация')
astu_first_object = astu_objects[0]
address = astu_first_object.address
except IndexError:
address = 'Unknown'
# print(node + ';' + str(result_dict[node]) + ';"' + address + '"')
result_str += node + ';' + str(result_dict[node]) + ';"' + address + '"' + "\n"
context = super(ClientsCount, self).get_context_data(**kwargs)
context['result_str'] = result_str
return context
class NEList(LoginRequiredMixin, ListView, FormView):
template_name = 'net/ne_list.html'
form_class = NEListForm
model = Equipment
success_url = 'net/ne_list'
paginate_by = 20
context_object_name = 'ne_list'
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_queryset(self):
ne_list = Equipment.objects.all()
# defaults
is_login_discovered = 'any' # any value
is_vendor_discovered = 'any'
ip_or_subnet = ''
if self.request.method == 'POST':
form = NEListForm(self.request.POST)
if form.is_valid():
is_login_discovered = form.cleaned_data['is_login_discovered']
is_vendor_discovered = form.cleaned_data['is_vendor_discovered']
ip_or_subnet = form.cleaned_data['ip_or_subnet']
if self.request.method == 'GET':
is_login_discovered = self.request.GET.get('is_login_discovered')
is_vendor_discovered = self.request.GET.get('is_login_discovered')
ip_or_subnet = self.request.GET.get('ip_or_subnet')
# Filter login_discovered
if is_login_discovered == 'yes':
ne_list = ne_list.filter(credentials_id__isnull=False)
elif is_login_discovered == 'no':
ne_list = ne_list.filter(credentials_id__isnull=True)
else: # 'any'
pass
# Filter vendor discovered
if is_vendor_discovered == 'yes':
ne_list = ne_list.filter(vendor__isnull=False)
elif is_vendor_discovered == 'no':
ne_list = ne_list.filter(vendor__isnull=True)
else: # any
pass
ip_re = r'^([0-9]+\.){3}[0-9]+$'
mask_re = r'^([0-9]+\.){3}[0-9]+\/\d{1,2}$'
# IP / hostname / subnet filtering
if ip_or_subnet and (ip_or_subnet is not None) and (ip_or_subnet != 'None'):
if re.match(ip_re, ip_or_subnet): # IP-address only
ne_list = ne_list.filter(ne_ip=ip_or_subnet)
elif re.match(mask_re, ip_or_subnet): # Subnet
try:
ne_list = ne_list.filter(ne_ip__net_contained=ip_or_subnet)
except ValueError as err:
messages.add_message(self.request, messages.ERROR, 'Subnet search error. ' + str(err))
else: # filtering by hostname
ne_list = ne_list.filter(hostname__icontains=ip_or_subnet)
# return result
return ne_list
def get_context_data(self, **kwargs):
context = super(NEList, self).get_context_data(**kwargs)
context['row_count'] = self.get_queryset().count()
if self.request.method == 'GET':
context['is_login_discovered'] = self.request.GET.get('is_login_discovered')
context['is_vendor_discovered'] = self.request.GET.get('is_vendor_discovered')
context['ip_or_subnet'] = self.request.GET.get('ip_or_subnet')
if self.request.method == 'POST':
form = NEListForm(self.request.POST)
if form.is_valid():
context['is_login_discovered'] = form.cleaned_data['is_login_discovered']
context['is_vendor_discovered'] = form.cleaned_data['is_vendor_discovered']
context['ip_or_subnet'] = form.cleaned_data['ip_or_subnet']
return context
class NEDetail(LoginRequiredMixin, DetailView):
template_name = 'net/ne_detail.html'
model = Equipment
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
ip = str(context['object'].ne_ip).replace('/32', '') # removing /32 from IPv4 addr
try:
astu_object = ASTU.objects.get(ne_ip=ip) # check if NE with this IP exists in ASTU table
address = astu_object.address # getting address
except ASTU.DoesNotExist:
address = 'Not found'
context['address'] = address # return it to the context
config_archives = EquipmentConfig.objects.filter(equipment_id=context['object'].id)
context['config_archives'] = config_archives[:20] # Last 20 configurations
return context
class SubnetsList(LoginRequiredMixin, ListView):
template_name = 'net/subnets_list.html'
model = Subnets
def get_queryset(self):
subnets_list = Subnets.objects.all()
return subnets_list
def get_context_data(self, **kwargs):
context = super(SubnetsList, self).get_context_data(**kwargs)
context['row_count'] = self.get_queryset().count()
return context
class ConfigSearch(LoginRequiredMixin, ListView, FormView):
template_name = 'net/config_search.html'
form_class = ConfigSearchForm
model = Equipment
success_url = 'net/config_search'
paginate_by = 20
context_object_name = 'ne_list'
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_search_term(self):
"""
Returns search term from form if method is post, otherwise returns None
:return: search term or None
"""
if self.request.method == 'POST':
form = ConfigSearchForm(self.request.POST)
if form.is_valid():
search = form.cleaned_data['search'] or ''
return search
if self.request.method == 'GET':
return self.request.GET.get('search') or ''
def get_queryset(self):
ne_list = Equipment.objects.all() # all NE's
search = self.get_search_term()
if search:
ne_list = ne_list.filter(current_config__icontains=search)
return ne_list
return Equipment.objects.none() # otherwise return empty queryset / list
def get_context_data(self, **kwargs):
context = super(ConfigSearch, self).get_context_data(**kwargs)
context['row_count'] = self.get_queryset().count()
context['search'] = self.get_search_term()
return context
class CMDRunner(LoginRequiredMixin, FormView):
template_name = 'net/cmd_runner.html'
form_class = CMDRunnerForm
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.method == 'POST':
form = CMDRunnerForm(self.request.POST)
if form.is_valid():
ips = form.cleaned_data['ips_textfield']
cmds = form.cleaned_data['commands_list']
vendor = form.cleaned_data['vendor_choices']
cmd_to_celery(vendor, ips, cmds)
return context
|
{
"content_hash": "d8f6d98473ef95922b84c9aefb48e2f2",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 118,
"avg_line_length": 38.9572192513369,
"alnum_prop": 0.5922443376801647,
"repo_name": "dehu4ka/lna",
"id": "4ebfbc4fee1e04634408737c452de9e17245d0c3",
"size": "14718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "net/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7385"
},
{
"name": "HTML",
"bytes": "75367"
},
{
"name": "JavaScript",
"bytes": "106914"
},
{
"name": "Python",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
}
|
"""
MSDP (Mud Server Data Protocol)
This implements the MSDP protocol as per
http://tintin.sourceforge.net/msdp/. MSDP manages out-of-band
communication between the client and server, for updating health bars
etc.
"""
import re
from src.utils.utils import to_str
# MSDP-relevant telnet cmd/opt-codes
MSDP = chr(69)
MSDP_VAR = chr(1)
MSDP_VAL = chr(2)
MSDP_TABLE_OPEN = chr(3)
MSDP_TABLE_CLOSE = chr(4)
MSDP_ARRAY_OPEN = chr(5)
MSDP_ARRAY_CLOSE = chr(6)
IAC = chr(255)
SB = chr(250)
SE = chr(240)
force_str = lambda inp: to_str(inp, force_string=True)
# pre-compiled regexes
# returns 2-tuple
regex_array = re.compile(r"%s(.*?)%s%s(.*?)%s" % (MSDP_VAR, MSDP_VAL,
MSDP_ARRAY_OPEN,
MSDP_ARRAY_CLOSE))
# returns 2-tuple (may be nested)
regex_table = re.compile(r"%s(.*?)%s%s(.*?)%s" % (MSDP_VAR, MSDP_VAL,
MSDP_TABLE_OPEN,
MSDP_TABLE_CLOSE))
regex_var = re.compile(MSDP_VAR)
regex_val = re.compile(MSDP_VAL)
# Msdp object handler
class Msdp(object):
"""
Implements the MSDP protocol.
"""
def __init__(self, protocol):
"""
Initiates by storing the protocol
on itself and trying to determine
if the client supports MSDP.
"""
self.protocol = protocol
self.protocol.protocol_flags['MSDP'] = False
self.protocol.negotiationMap[MSDP] = self.msdp_to_evennia
self.protocol.will(MSDP).addCallbacks(self.do_msdp, self.no_msdp)
self.msdp_reported = {}
def no_msdp(self, option):
"No msdp supported or wanted"
self.protocol.handshake_done()
def do_msdp(self, option):
"""
Called when client confirms that it can do MSDP.
"""
self.protocol.protocol_flags['MSDP'] = True
self.protocol.handshake_done()
def evennia_to_msdp(self, cmdname, *args, **kwargs):
"""
handle return data from cmdname by converting it to
a proper msdp structure. data can either be a single value (will be
converted to a string), a list (will be converted to an MSDP_ARRAY),
or a dictionary (will be converted to MSDP_TABLE).
OBS - there is no actual use of arrays and tables in the MSDP
specification or default commands -- are returns are implemented
as simple lists or named lists (our name for them here, these
un-bounded structures are not named in the specification). So for
now, this routine will not explicitly create arrays nor tables,
although there are helper methods ready should it be needed in
the future.
"""
def make_table(name, **kwargs):
"build a table that may be nested with other tables or arrays."
string = MSDP_VAR + force_str(name) + MSDP_VAL + MSDP_TABLE_OPEN
for key, val in kwargs.items():
if isinstance(val, dict):
string += make_table(string, key, **val)
elif hasattr(val, '__iter__'):
string += make_array(string, key, *val)
else:
string += MSDP_VAR + force_str(key) + MSDP_VAL + force_str(val)
string += MSDP_TABLE_CLOSE
return string
def make_array(name, *args):
"build a array. Arrays may not nest tables by definition."
string = MSDP_VAR + force_str(name) + MSDP_ARRAY_OPEN
string += MSDP_VAL.join(force_str(arg) for arg in args)
string += MSDP_ARRAY_CLOSE
return string
def make_list(name, *args):
"build a simple list - an array without start/end markers"
string = MSDP_VAR + force_str(name)
string += MSDP_VAL.join(force_str(arg) for arg in args)
return string
def make_named_list(name, **kwargs):
"build a named list - a table without start/end markers"
string = MSDP_VAR + force_str(name)
for key, val in kwargs.items():
string += MSDP_VAR + force_str(key) + MSDP_VAL + force_str(val)
return string
# Default MSDP commands
print "MSDP outgoing:", cmdname, args, kwargs
cupper = cmdname.upper()
if cupper == "LIST":
if args:
args = list(args)
mode = args.pop(0).upper()
self.data_out(make_array(mode, *args))
elif cupper == "REPORT":
self.data_out(make_list("REPORT", *args))
elif cupper == "UNREPORT":
self.data_out(make_list("UNREPORT", *args))
elif cupper == "RESET":
self.data_out(make_list("RESET", *args))
elif cupper == "SEND":
self.data_out(make_named_list("SEND", **kwargs))
else:
# return list or named lists.
msdp_string = ""
if args:
msdp_string += make_list(cupper, *args)
if kwargs:
msdp_string += make_named_list(cupper, **kwargs)
self.data_out(msdp_string)
def msdp_to_evennia(self, data):
"""
Handle a client's requested negotiation, converting
it into a function mapping - either one of the MSDP
default functions (LIST, SEND etc) or a custom one
in OOB_FUNCS dictionary. command names are case-insensitive.
varname, var --> mapped to function varname(var)
arrayname, array --> mapped to function arrayname(*array)
tablename, table --> mapped to function tablename(**table)
Note: Combinations of args/kwargs to one function is not supported
in this implementation (it complicates the code for limited
gain - arrayname(*array) is usually as complex as anyone should
ever need to go anyway (I hope!).
"""
tables = {}
arrays = {}
variables = {}
if hasattr(data, "__iter__"):
data = "".join(data)
#logger.log_infomsg("MSDP SUBNEGOTIATION: %s" % data)
for key, table in regex_table.findall(data):
tables[key] = {}
for varval in regex_var.split(table):
parts = regex_val.split(varval)
tables[key].expand({parts[0]: tuple(parts[1:]) if len(parts) > 1 else ("",)})
for key, array in regex_array.findall(data):
arrays[key] = []
for val in regex_val.split(array):
arrays[key].append(val)
arrays[key] = tuple(arrays[key])
for varval in regex_var.split(regex_array.sub("", regex_table.sub("", data))):
# get remaining varvals after cleaning away tables/arrays
parts = regex_val.split(varval)
variables[parts[0].upper()] = tuple(parts[1:]) if len(parts) > 1 else ("", )
#print "MSDP: table, array, variables:", tables, arrays, variables
# all variables sent through msdp to Evennia are considered commands
# with arguments. There are three forms of commands possible
# through msdp:
#
# VARNAME VAR -> varname(var)
# ARRAYNAME VAR VAL VAR VAL VAR VAL ENDARRAY -> arrayname(val,val,val)
# TABLENAME TABLE VARNAME VAL VARNAME VAL ENDTABLE ->
# tablename(varname=val, varname=val)
#
# default MSDP functions
if "LIST" in variables:
self.data_in("list", *variables.pop("LIST"))
if "REPORT" in variables:
self.data_in("report", *variables.pop("REPORT"))
if "REPORT" in arrays:
self.data_in("report", *(arrays.pop("REPORT")))
if "UNREPORT" in variables:
self.data_in("unreport", *(arrays.pop("UNREPORT")))
if "RESET" in variables:
self.data_in("reset", *variables.pop("RESET"))
if "RESET" in arrays:
self.data_in("reset", *(arrays.pop("RESET")))
if "SEND" in variables:
self.data_in("send", *variables.pop("SEND"))
if "SEND" in arrays:
self.data_in("send", *(arrays.pop("SEND")))
# if there are anything left consider it a call to a custom function
for varname, var in variables.items():
# a simple function + argument
self.data_in(varname, (var,))
for arrayname, array in arrays.items():
# we assume the array are multiple arguments to the function
self.data_in(arrayname, *array)
for tablename, table in tables.items():
# we assume tables are keyword arguments to the function
self.data_in(tablename, **table)
def data_out(self, msdp_string):
"""
Return a msdp-valid subnegotiation across the protocol.
"""
#print "msdp data_out (without IAC SE):", msdp_string
self.protocol ._write(IAC + SB + MSDP + force_str(msdp_string) + IAC + SE)
def data_in(self, funcname, *args, **kwargs):
"""
Send oob data to Evennia
"""
#print "msdp data_in:", funcname, args, kwargs
self.protocol.data_in(text=None, oob=(funcname, args, kwargs))
|
{
"content_hash": "b052279ca32ef80b2276b012cc14e8dc",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 93,
"avg_line_length": 38.13991769547325,
"alnum_prop": 0.5700258955545965,
"repo_name": "Pathel/deuterium",
"id": "aa36bd65d828b5cb2b00732f8d2c311234765413",
"size": "9268",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/server/portal/msdp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "22126"
},
{
"name": "Python",
"bytes": "2117297"
}
],
"symlink_target": ""
}
|
from application.views import app
import unittest
import os
class TestRoutes(unittest.TestCase):
def setUp(self):
app.config.from_object(os.environ.get('SETTINGS'))
self.app = app.test_client()
def test_health(self):
self.assertEqual((self.app.get('/health')).status, '200 OK')
|
{
"content_hash": "a69ba3c15c38364bc2dcd2c022f5452b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 26.083333333333332,
"alnum_prop": 0.6773162939297125,
"repo_name": "Skablam/flask-spawn",
"id": "0667c6934a17efde55ba0d8a6f64d5a460b2a18e",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskspawn/cookiecutters/small/{{cookiecutter.repo_name}}/tests/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "683"
},
{
"name": "Python",
"bytes": "16175"
},
{
"name": "Shell",
"bytes": "256"
}
],
"symlink_target": ""
}
|
__author__ = 'ray'
__date__ = '6/12/14'
"""
georest.storage.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~
Geo Feature Storage Exceptions.
"""
from ..geo.exceptions import *
class StorageError(GeoException):
pass
class StorageInternalError(StorageError):
HTTP_STATUS_CODE = 500
class UnknownStoragePrototype(StorageError):
HTTP_STATUS_CODE = 400
class DuplicatedBucket(StorageError):
HTTP_STATUS_CODE = 409
class BucketNotFound(StorageError):
HTTP_STATUS_CODE = 404
class FeatureNotFound(StorageError):
HTTP_STATUS_CODE = 404
class ParentRevisionNotFound(StorageError):
HTTP_STATUS_CODE = 409
class NotHeadRevision(StorageError):
HTTP_STATUS_CODE = 409
|
{
"content_hash": "c1e5a6665e0c41d00781a7b454e4b29f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 44,
"avg_line_length": 16.80952380952381,
"alnum_prop": 0.6912181303116147,
"repo_name": "Kotaimen/georest",
"id": "cab65989b9f0bfe3210741925cafd171942f3e68",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "georest/storage/exceptions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Lua",
"bytes": "3316"
},
{
"name": "Python",
"bytes": "203216"
}
],
"symlink_target": ""
}
|
"""set the sip version, cause pandas-qt uses version 2 by default"""
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
from PyQt4 import QtGui
import pandas
import pandasqt
import numpy
"""setup a new empty model"""
model = pandasqt.DataFrameModel()
"""setup an application and create a table view widget"""
app = QtGui.QApplication([])
widget = QtGui.QTableView()
widget.resize(800, 600)
widget.show()
"""asign the created model"""
widget.setModel(model)
"""create some test data"""
data = pandas.DataFrame([10], columns=['A'])
"""convert the column to the numpy.int8 datatype to test limitation in the table
int8 is limited to -128-127
"""
data['A'] = data['A'].astype(numpy.int8)
"""fill the model with data"""
model.setDataFrame(data)
"""assign new delegates, only useful for big int or float values"""
pandasqt.setDelegatesFromDtype(widget)
"""start the app"""
app.exec_()
|
{
"content_hash": "dc44da8ed649dd2881b9208312cf7f6f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 25.571428571428573,
"alnum_prop": 0.7251396648044692,
"repo_name": "szaiser/pandas-qt",
"id": "79803f43be8da2047bcecc43e836ee507b8ea0da",
"size": "895",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/BasicExample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Prolog",
"bytes": "47855"
},
{
"name": "Python",
"bytes": "302423"
}
],
"symlink_target": ""
}
|
from bbfetchext import *
|
{
"content_hash": "bb3a7b017d4262f09a4f18757d33f3cc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 24,
"alnum_prop": 0.8333333333333334,
"repo_name": "Equitable/trump",
"id": "47c32bfd8993ff566f0c332ae4bf39b5bf5edf8e",
"size": "24",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trump/extensions/source/tx-bbfetch/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "249279"
}
],
"symlink_target": ""
}
|
import proto # type: ignore
from google.ads.googleads.v11.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v11.resources.types import (
campaign_budget as gagr_campaign_budget,
)
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.services",
marshal="google.ads.googleads.v11",
manifest={
"MutateCampaignBudgetsRequest",
"CampaignBudgetOperation",
"MutateCampaignBudgetsResponse",
"MutateCampaignBudgetResult",
},
)
class MutateCampaignBudgetsRequest(proto.Message):
r"""Request message for
[CampaignBudgetService.MutateCampaignBudgets][google.ads.googleads.v11.services.CampaignBudgetService.MutateCampaignBudgets].
Attributes:
customer_id (str):
Required. The ID of the customer whose
campaign budgets are being modified.
operations (Sequence[google.ads.googleads.v11.services.types.CampaignBudgetOperation]):
Required. The list of operations to perform
on individual campaign budgets.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v11.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="CampaignBudgetOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class CampaignBudgetOperation(proto.Message):
r"""A single operation (create, update, remove) on a campaign
budget.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v11.resources.types.CampaignBudget):
Create operation: No resource name is
expected for the new budget.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v11.resources.types.CampaignBudget):
Update operation: The campaign budget is
expected to have a valid resource name.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: A resource name for the removed budget is
expected, in this format:
``customers/{customer_id}/campaignBudgets/{budget_id}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_campaign_budget.CampaignBudget,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=gagr_campaign_budget.CampaignBudget,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation",)
class MutateCampaignBudgetsResponse(proto.Message):
r"""Response message for campaign budget mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (for example, auth errors), we return
an RPC level error.
results (Sequence[google.ads.googleads.v11.services.types.MutateCampaignBudgetResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateCampaignBudgetResult",
)
class MutateCampaignBudgetResult(proto.Message):
r"""The result for the campaign budget mutate.
Attributes:
resource_name (str):
Returned for successful operations.
campaign_budget (google.ads.googleads.v11.resources.types.CampaignBudget):
The mutated campaign budget with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
campaign_budget = proto.Field(
proto.MESSAGE, number=2, message=gagr_campaign_budget.CampaignBudget,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "0df45c32b4486a446786bc78d358a3fb",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 129,
"avg_line_length": 37.33974358974359,
"alnum_prop": 0.6727896995708155,
"repo_name": "googleads/google-ads-python",
"id": "8a998c2fde70adc7e3b782fb5e6376706451bfd5",
"size": "6425",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/services/types/campaign_budget_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
import os
from kubernetes import client
from oslo_utils import uuidutils
from unittest import mock
from tacker import context
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnfd_utils
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes
from tacker.sol_refactored.nfvo import nfvo_client
from tacker.sol_refactored import objects
from tacker.tests.unit import base
from tacker.tests.unit.sol_refactored.infra_drivers.kubernetes import fakes
CNF_SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d70a1177"
class TestKubernetes(base.TestCase):
def setUp(self):
super(TestKubernetes, self).setUp()
objects.register_all()
self.driver = kubernetes.Kubernetes()
self.context = context.get_admin_context()
cur_dir = os.path.dirname(__file__)
sample_dir = os.path.join(cur_dir, "../..", "samples")
self.vnfd_1 = vnfd_utils.Vnfd(CNF_SAMPLE_VNFD_ID)
self.vnfd_1.init_from_csar_dir(os.path.join(sample_dir, "sample2"))
def test_setup_k8s_reses_fail_diffs(self):
not_exist = 'Files/kubernetes/not_exist.yaml'
expected_ex = sol_ex.CnfDefinitionNotFound(
diff_files=not_exist)
target_k8s_files = [not_exist]
ex = self.assertRaises(sol_ex.CnfDefinitionNotFound,
self.driver._setup_k8s_reses, self.vnfd_1,
target_k8s_files, mock.Mock(), mock.Mock())
self.assertEqual(expected_ex.detail, ex.detail)
def test_wait_k8s_reses_ready(self):
res1 = mock.Mock()
res1.is_ready = mock.MagicMock(side_effect=[True, True])
res2 = mock.Mock()
res2.is_ready = mock.MagicMock(side_effect=[False, True])
k8s_reses = [res1, res2]
kubernetes.CHECK_INTERVAL = 1
self.driver._wait_k8s_reses_ready(k8s_reses)
self.assertEqual(1, res1.is_ready.call_count)
self.assertEqual(2, res2.is_ready.call_count)
def test_wait_k8s_reses_deleted(self):
res1 = mock.Mock()
res1.is_exists = mock.MagicMock(side_effect=[True, False])
res2 = mock.Mock()
res2.is_exists = mock.MagicMock(side_effect=[True, False])
k8s_reses = [res1, res2]
kubernetes.CHECK_INTERVAL = 1
self.driver._wait_k8s_reses_deleted(k8s_reses)
self.assertEqual(2, res1.is_exists.call_count)
self.assertEqual(2, res2.is_exists.call_count)
@mock.patch('tacker.sol_refactored.infra_drivers.kubernetes.'
'kubernetes_utils.list_namespaced_pods')
def test_wait_k8s_reses_updated(self, mock_list_namespaced_pods):
mock_list_namespaced_pods.return_value = []
res1 = mock.Mock()
res1.is_update = mock.MagicMock(side_effect=[False, True])
res2 = mock.Mock()
res2.is_update = mock.MagicMock(side_effect=[True, True])
k8s_reses = [res1, res2]
kubernetes.CHECK_INTERVAL = 1
self.driver._wait_k8s_reses_updated(k8s_reses, mock.Mock(),
mock.Mock(), mock.Mock())
self.assertEqual(2, res1.is_update.call_count)
self.assertEqual(1, res2.is_update.call_count)
def test_check_status_timeout(self):
res1 = mock.Mock()
res1.is_ready = mock.MagicMock(return_value=False)
k8s_reses = [res1]
self.config_fixture.config(group='v2_vnfm',
kubernetes_vim_rsc_wait_timeout=2)
kubernetes.CHECK_INTERVAL = 1
self.assertRaises(sol_ex.K8sOperaitionTimeout,
self.driver._wait_k8s_reses_ready, k8s_reses)
# maybe 3 but possible 2
self.assertTrue(res1.is_ready.call_count >= 2)
@mock.patch.object(nfvo_client.NfvoClient, 'get_vnfd')
@mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
def test_sync_db(
self, mock_list_namespaced_pod, mock_get_vnfd):
vnf_instance_obj = fakes.fake_vnf_instance()
vnfc_rsc_info_obj1, vnfc_info_obj1 = fakes.fake_vnfc_resource_info(
vdu_id='VDU1', rsc_kind='Deployment',
pod_name="vdu1-1234567890-abcd", rsc_name="vdu1")
vnf_instance_obj.instantiatedVnfInfo.vnfcResourceInfo = [
vnfc_rsc_info_obj1
]
vim_connection_object = fakes.fake_vim_connection_info()
vnf_instance_obj.vimConnectionInfo['vim1'] = vim_connection_object
mock_list_namespaced_pod.return_value = client.V1PodList(
items=[
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd1"),
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd2")])
mock_get_vnfd.return_value = self.vnfd_1
vnf_instance_obj.vnfdId = uuidutils.generate_uuid()
vnf_instance_obj.instantiatedVnfInfo.scaleStatus = [
fakes.fake_scale_status(vnfd_id=vnf_instance_obj.vnfdId)
]
self.driver.sync_db(
context=self.context, vnf_instance=vnf_instance_obj,
vim_info=vim_connection_object)
self.assertEqual(
2, vnf_instance_obj.instantiatedVnfInfo.metadata[
'vdu_reses']['VDU1']['spec']['replicas'])
@mock.patch.object(nfvo_client.NfvoClient, 'get_vnfd')
@mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
def test_sync_db_no_diff(
self, mock_list_namespaced_pod, mock_get_vnfd):
vnf_instance_obj = fakes.fake_vnf_instance()
vnfc_rsc_info_obj1, vnfc_info_obj1 = fakes.fake_vnfc_resource_info(
vdu_id='VDU1', rsc_kind='Deployment',
pod_name="vdu1-1234567890-abcd1", rsc_name="vdu1")
vnf_instance_obj.instantiatedVnfInfo.vnfcResourceInfo = [
vnfc_rsc_info_obj1
]
vim_connection_object = fakes.fake_vim_connection_info()
vnf_instance_obj.vimConnectionInfo['vim1'] = vim_connection_object
mock_list_namespaced_pod.return_value = client.V1PodList(
items=[
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd1")])
mock_get_vnfd.return_value = self.vnfd_1
vnf_instance_obj.vnfdId = uuidutils.generate_uuid()
vnf_instance_obj.instantiatedVnfInfo.scaleStatus = [
fakes.fake_scale_status(vnfd_id=vnf_instance_obj.vnfdId)
]
ex = self.assertRaises(
sol_ex.DbSyncNoDiff, self.driver.sync_db,
self.context, vnf_instance_obj, vim_connection_object)
self.assertEqual(
"There are no differences in Vnfc resources.", ex.args[0])
@mock.patch.object(vnfd_utils.Vnfd, 'get_scale_vdu_and_num')
@mock.patch.object(nfvo_client.NfvoClient, 'get_vnfd')
@mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
def test_sync_db_scale_level(
self, mock_list_namespaced_pod, mock_get_vnfd,
mock_scale_vdu_and_num):
vnf_instance_obj = fakes.fake_vnf_instance()
vnfc_rsc_info_obj1, vnfc_info_obj1 = fakes.fake_vnfc_resource_info(
vdu_id='VDU1', rsc_kind='Deployment',
pod_name="vdu1-1234567890-abcd1", rsc_name="vdu1")
vnf_instance_obj.instantiatedVnfInfo.vnfcResourceInfo = [
vnfc_rsc_info_obj1
]
vim_connection_object = fakes.fake_vim_connection_info()
vnf_instance_obj.vimConnectionInfo['vim1'] = vim_connection_object
mock_list_namespaced_pod.return_value = client.V1PodList(
items=[
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd1"),
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd2"),
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd3"),
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd4")
])
mock_get_vnfd.return_value = self.vnfd_1
vnf_instance_obj.vnfdId = uuidutils.generate_uuid()
vnf_instance_obj.instantiatedVnfInfo.scaleStatus = [
fakes.fake_scale_status(vnfd_id=vnf_instance_obj.vnfdId)
]
delta = 2
mock_scale_vdu_and_num.return_value = {'VDU1': delta}
current_pod_num = 4
vdu_id = "VDU1"
ex = self.assertRaises(
sol_ex.DbSyncFailed, self.driver.sync_db,
self.context, vnf_instance_obj, vim_connection_object)
self.assertEqual(
"Error computing 'scale_level'. current Pod num: "
f"{current_pod_num} delta: {delta}. vnf: {vnf_instance_obj.id} "
f"vdu: {vdu_id}", ex.args[0])
@mock.patch.object(vnfd_utils.Vnfd, 'get_scale_vdu_and_num')
@mock.patch.object(nfvo_client.NfvoClient, 'get_vnfd')
@mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
def test_sync_db_pod_range(
self, mock_list_namespaced_pod, mock_get_vnfd,
mock_scale_vdu_and_num):
vnf_instance_obj = fakes.fake_vnf_instance()
vnfc_rsc_info_obj1, vnfc_info_obj1 = fakes.fake_vnfc_resource_info(
vdu_id='VDU1', rsc_kind='Deployment',
pod_name="vdu1-1234567890-abcd1", rsc_name="vdu1")
vnf_instance_obj.instantiatedVnfInfo.vnfcResourceInfo = [
vnfc_rsc_info_obj1
]
vim_connection_object = fakes.fake_vim_connection_info()
vnf_instance_obj.vimConnectionInfo['vim1'] = vim_connection_object
mock_list_namespaced_pod.return_value = client.V1PodList(
items=[
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd1"),
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd2"),
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd3"),
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd4"),
fakes.get_fake_pod_info(
kind='Deployment', pod_name="vdu1-1234567890-abcd5")
])
mock_get_vnfd.return_value = self.vnfd_1
vnf_instance_obj.vnfdId = uuidutils.generate_uuid()
vnf_instance_obj.instantiatedVnfInfo.scaleStatus = [
fakes.fake_scale_status(vnfd_id=vnf_instance_obj.vnfdId)
]
delta = 2
mock_scale_vdu_and_num.return_value = {'VDU1': delta}
current_pod_num = 5
vdu_id = "VDU1"
ex = self.assertRaises(
sol_ex.DbSyncFailed, self.driver.sync_db,
self.context, vnf_instance_obj, vim_connection_object)
self.assertEqual(
f"Failed to update database vnf {vnf_instance_obj.id} "
f"vdu: {vdu_id}. Pod num is out of range. "
f"pod_num: {current_pod_num}", ex.args[0])
|
{
"content_hash": "6f6d4d784e1a2f3577469280ef553954",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 76,
"avg_line_length": 42.16475095785441,
"alnum_prop": 0.616356201726488,
"repo_name": "openstack/tacker",
"id": "e35d202168445b2ffa177490da99891ff1003492",
"size": "11632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tacker/tests/unit/sol_refactored/infra_drivers/kubernetes/test_kubernetes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "10809"
},
{
"name": "Mako",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "7648075"
},
{
"name": "Ruby",
"bytes": "2841"
},
{
"name": "Shell",
"bytes": "61750"
},
{
"name": "Smarty",
"bytes": "3624"
}
],
"symlink_target": ""
}
|
from html import HTML
import json
from .data import Data
from .grid import Grid
from .axes import Axes
from .legend import Legend
from .tooltip import Tooltip
from .regions import Regions
from .point import Point
from .size import Size
from .padding import Padding
class Chart(object):
"""
Create and modify a chart.
Parameters
----------
name : str
The name of the chart. This will be the id of the div that holds the chart. Therefore no two charts in the
same document should have the same name.
local_jquery : str
Path to a local version of jquery. If not provided, one hosted on a CDN is used.
**Default:** None
local_requirejs : str
Path to a local version of requirejs. If not provided, one hosted on a CDN is used.
**Default:** None
local_d3_js : str
Path to a local version of d3js. If not provided, one hosted on a CDN is used.
**Default:** None
local_c3_js : str
Path to a local version of c3js. If not provided, one hosted on a CDN is used.
**Default:** None
local_c3_css : str
Path to a local version of c3's css. If not provided, one hosted on a CDN is used.
**Default:** None
Attributes
----------
axes : c3py.axes.Axes
data : c3py.data.Data
grid : c3py.grid.Grid
legend : c3py.legend.Legend
tooltip : c3py.tooltip.Tooltip
regions : c3py.regions.Regions
point : c3py.point.Point
size : c3py.size.Size
padding : c3py.padding.Padding
"""
def __init__(self, name, local_jquery=None, local_requirejs=None, local_d3_js=None, local_c3_js=None,
local_c3_css=None):
super(Chart, self).__init__()
self.chart_html = HTML()
self.name = name
self.c3_css_path = local_c3_css or 'https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.10/c3.min.css'
self.jquery_path = local_jquery or 'http://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js'
self.requirejs_path = local_requirejs or \
'https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.17/require.min.js'
self.d3_js_path = local_d3_js or 'http://d3js.org/d3.v3.min'
self.c3_js_path = local_c3_js or 'https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.10/c3.min'
self.chart_html.div('', id=self.name)
self.chart_html.link('', href=self.c3_css_path, rel='stylesheet', type='text/css')
self.chart_html.script('', src=self.jquery_path)
self.chart_html.script('', src=self.requirejs_path)
self.requirejs_config = {
'paths': {
'c3': self.c3_js_path,
'd3': self.d3_js_path,
}
}
self.axes = Axes()
self.data = Data(self.axes)
self.grid = Grid(self.axes)
self.legend = Legend()
self.tooltip = Tooltip()
self.regions = Regions(self.axes)
self.point = Point()
self.size = Size()
self.padding = Padding()
self.chart_dict = {
'bindto': "'#" + self.name + "'",
'data': self.data.config,
'axis': self.axes.config,
'grid': self.grid.config,
'legend': self.legend.config,
'tooltip': self.tooltip.config,
'regions': self.regions.config,
'point': self.point.config,
'size': self.size.config,
'padding': self.padding.config,
}
def __get_main_styles_string__(self):
style_string = ''
for style in self.regions.styles:
style_string += '''
.c3-region.{name} {{
fill: {fill};
}}
'''.format(
name=style['name'],
fill=style['fill'],
)
return style_string
def __get_main_script_string__(self):
chart_dict_string = json.dumps(self.chart_dict).replace('"', '')
script = '''
require.config({requirejs_config});
require(["d3", "c3"], function(d3, c3) {{
var chart = c3.generate({chart_dict});
}});
'''.format(
requirejs_config=json.dumps(self.requirejs_config),
chart_dict=chart_dict_string,
)
return script
def get_html_string(self):
"""
Return the HTML string which will draw the chart.
In a Jupyter notebook, this would usually be used as the argument for IPython.display.HTML(),
for the interactive chart to be displayed in the notebook.
That is, IPython.display.HTML(chart.get_html_string()).
Returns
-------
str
"""
styles = self.__get_main_styles_string__()
script = self.__get_main_script_string__()
self.chart_html.style(styles)
self.chart_html.script(script, type='text/javascript')
return unicode(self.chart_html)
|
{
"content_hash": "28870464d1241a127a65262a85745b23",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 114,
"avg_line_length": 27.480662983425415,
"alnum_prop": 0.5679533574587857,
"repo_name": "h0s/c3py",
"id": "e68a1553246cc09ddbd474cd116423919326591f",
"size": "4974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "c3py/chart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "101"
},
{
"name": "Python",
"bytes": "35689"
}
],
"symlink_target": ""
}
|
import numpy
import pyamgcl_ext
from pyamgcl_ext import coarsening, relaxation, solver_type
from scipy.sparse.linalg import LinearOperator
class make_solver:
"""
Iterative solver preconditioned by algebraic multigrid
The class builds algebraic multigrid hierarchy for the given matrix and
uses the hierarchy as a preconditioner for the specified iterative solver.
"""
def __init__(self,
A,
coarsening=pyamgcl_ext.coarsening.smoothed_aggregation,
relaxation=pyamgcl_ext.relaxation.spai0,
solver=pyamgcl_ext.solver_type.bicgstabl,
prm={}
):
"""
Class constructor.
Creates algebraic multigrid hierarchy.
Parameters
----------
A : the system matrix in scipy.sparse format
coarsening : {ruge_stuben, aggregation, *smoothed_aggregation*, smoothed_aggr_emin}
The coarsening type to use for construction of the multigrid
hierarchy.
relaxation : {damped_jacobi, gauss_seidel, chebyshev, *spai0*, ilu0}
The relaxation scheme to use for multigrid cycles.
solver : {cg, bicgstab, *bicgstabl*, gmres}
The iterative solver to use.
prm : dictionary with amgcl parameters
"""
Acsr = A.tocsr()
self.S = pyamgcl_ext.make_solver(
coarsening, relaxation, solver, prm,
Acsr.indptr.astype(numpy.int32),
Acsr.indices.astype(numpy.int32),
Acsr.data.astype(numpy.float64)
)
def __repr__(self):
"""
Provides information about the multigrid hierarchy.
"""
return self.S.__repr__()
def __call__(self, *args):
"""
Solves the system for the given system matrix and the right-hand side.
In case single argument is given, it is considered to be the right-hand
side. The matrix given at the construction is used for solution.
In case two arguments are given, the first one should be a new system
matrix, and the second is the right-hand side. In this case the
multigrid hierarchy initially built at construction is still used as a
preconditioner. This may be of use for solution of non-steady-state
PDEs, where the discretized system matrix slightly changes on each time
step, but multigrid hierarchy built for one of previous time steps is
still able to work as a decent preconditioner. Thus time needed for
hierarchy reconstruction is saved.
Parameters
----------
A : the new system matrix (optional)
rhs : the right-hand side
"""
if len(args) == 1:
return self.S( args[0].astype(numpy.float64) )
elif len(args) == 2:
Acsr = args[0].tocsr()
return self.S(
Acsr.indptr.astype(numpy.int32),
Acsr.indices.astype(numpy.int32),
Acsr.data.astype(numpy.float64),
args[1].astype(numpy.float64)
)
else:
raise "Wrong number of arguments"
def iterations(self):
"""
Returns iterations made during last solve
"""
return self.S.iterations()
def residual(self):
"""
Returns relative error achieved during last solve
"""
return self.S.residual()
class make_preconditioner(LinearOperator):
"""
Algebraic multigrid hierarchy that may be used as a preconditioner with
scipy iterative solvers.
"""
def __init__(self,
A,
coarsening=pyamgcl_ext.coarsening.smoothed_aggregation,
relaxation=pyamgcl_ext.relaxation.spai0,
prm={}
):
"""
Class constructor.
Creates algebraic multigrid hierarchy.
Parameters
----------
A : the system matrix in scipy.sparse format
coarsening : {ruge_stuben, aggregation, *smoothed_aggregation*, smoothed_aggr_emin}
The coarsening type to use for construction of the multigrid
hierarchy.
relaxation : {damped_jacobi, gauss_seidel, chebyshev, *spai0*, ilu0}
The relaxation scheme to use for multigrid cycles.
prm : dictionary with amgcl parameters
"""
Acsr = A.tocsr()
self.P = pyamgcl_ext.make_preconditioner(
coarsening, relaxation, prm,
Acsr.indptr.astype(numpy.int32),
Acsr.indices.astype(numpy.int32),
Acsr.data.astype(numpy.float64)
)
LinearOperator.__init__(self, A.shape, self.P)
def __repr__(self):
"""
Provides information about the multigrid hierarchy.
"""
return self.P.__repr__()
def __call__(self, x):
"""
Preconditions the given vector.
"""
return self.P(x.astype(numpy.float64))
|
{
"content_hash": "65db17242ccd08c316ad5c2d829fb7f5",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 91,
"avg_line_length": 33.851351351351354,
"alnum_prop": 0.5904191616766467,
"repo_name": "huahbo/amgcl",
"id": "e80ea144b025b128275d0f78957002a810945d2f",
"size": "5010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyamgcl/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6433"
},
{
"name": "C++",
"bytes": "476186"
},
{
"name": "CMake",
"bytes": "10753"
},
{
"name": "Pascal",
"bytes": "8852"
},
{
"name": "Python",
"bytes": "11286"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
import numpy as np
import pandas as pd
from rosie.chamber_of_deputies.classifiers.monthly_subquota_limit_classifier import MonthlySubquotaLimitClassifier
class TestMonthlySubquotaLimitClassifier(TestCase):
'''Testing Monthly Subquota Limit Classifier.
To include new test cases edit `MONTHLY_SUBQUOTA_LIMIT_FIXTURE_FILE`.
Each test case must have the following fields (see existing test cases as examples):
applicant_id:
A personal identifier code for every person making expenses.
Use the same number to group a test case that requires more than one
expense request.
subquota_number:
A number to classify a category of expenses.
Allowed values:
3 -- Fuels and lubricants
8 -- Security service provided by specialized company
120 -- Automotive vehicle renting or charter
122 -- Taxi, toll and parking
137 -- Participation in course, talk or similar event
issue_date:
Date when the expense was made.
year:
The quota year matching the expense request.
month:
The quota month matching the expense request.
net_value:
The value of the expense.
expected_prediction:
True or False indicating if this test case must be classified as suspicious or not.
test_case_description:
Description of what is being tested in this test case (also showed when test fails)
'''
MONTHLY_SUBQUOTA_LIMIT_FIXTURE_FILE = 'rosie/chamber_of_deputies/tests/fixtures/monthly_subquota_limit_classifier.csv'
def setUp(self):
self.full_dataset = pd.read_csv(
self.MONTHLY_SUBQUOTA_LIMIT_FIXTURE_FILE, dtype={'subquota_number': np.str})
self.dataset = self.full_dataset[
['applicant_id', 'subquota_number', 'issue_date', 'year', 'month', 'net_value']]
self.test_result_dataset = self.full_dataset[['expected_prediction', 'test_case_description']]
self.subject = MonthlySubquotaLimitClassifier()
self.subject.fit_transform(self.dataset)
self.prediction = self.subject.predict(self.dataset)
def test_predictions(self):
for index, row in self.test_result_dataset.iterrows():
self.assertEqual(
self.prediction[index],
row['expected_prediction'],
msg='Line {0}: {1}'.format(row, row['test_case_description']))
|
{
"content_hash": "2261e94d2bc125bb7344d7f879eda0df",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 122,
"avg_line_length": 36.029411764705884,
"alnum_prop": 0.6787755102040817,
"repo_name": "marcusrehm/serenata-de-amor",
"id": "76fa23db904e0294389d7d1c321bd2383f296187",
"size": "2450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rosie/rosie/chamber_of_deputies/tests/test_monthly_subquota_limit_classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "301"
},
{
"name": "Elm",
"bytes": "131019"
},
{
"name": "HTML",
"bytes": "4527"
},
{
"name": "JavaScript",
"bytes": "1468"
},
{
"name": "Python",
"bytes": "425718"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
}
|
"""
WSGI config for librosmasvendidos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "librosmasvendidos.settings")
application = get_wsgi_application()
|
{
"content_hash": "4d63b2e8c60d6ebf8cdbf3c4ade687e4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.6875,
"alnum_prop": 0.781021897810219,
"repo_name": "sn1k/listadelibros",
"id": "53f1c7924f5603db7bbcc4ba62d7a1cb7baf4c60",
"size": "411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "librosmasvendidos/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1245"
},
{
"name": "Python",
"bytes": "7537"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def anyfactor():
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
# frame (positive example)
assert iris.anyfactor(), "Expected true, but got false. Column 5 is a factor."
# frame (negative example)
assert not iris[:,:4].anyfactor(), "Expected false, but got true. Columns 1-4 are numeric."
# vec (positive example)
assert iris[4].anyfactor(), "Expected true, but got false. Column 5 is a factor."
# vec (negative example)
assert not iris[0].anyfactor(), "Expected false, but got true. Columns 1 is numeric."
if __name__ == "__main__":
pyunit_utils.standalone_test(anyfactor)
else:
anyfactor()
|
{
"content_hash": "67c0805a39fe05cab4d228533a515ec1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 95,
"avg_line_length": 24.09375,
"alnum_prop": 0.6562905317769131,
"repo_name": "h2oai/h2o-dev",
"id": "f92795e81030754707ad48fa623fa2a84a626465",
"size": "771",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_munging/unop/pyunit_anyfactor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162399"
},
{
"name": "CoffeeScript",
"bytes": "267048"
},
{
"name": "Emacs Lisp",
"bytes": "6465"
},
{
"name": "HTML",
"bytes": "140849"
},
{
"name": "Java",
"bytes": "6216622"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Jupyter Notebook",
"bytes": "5585408"
},
{
"name": "Makefile",
"bytes": "34105"
},
{
"name": "Python",
"bytes": "2644394"
},
{
"name": "R",
"bytes": "1848754"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22830"
},
{
"name": "Shell",
"bytes": "47513"
},
{
"name": "TeX",
"bytes": "579960"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse
import json
from zipfile import ZipFile
from metashare import settings
from django.db.models import Q
from django.shortcuts import get_object_or_404
from metashare.storage.models import StorageObject, MASTER, PROXY, INTERNAL, \
REMOTE
def inventory(request):
if settings.SYNC_NEEDS_AUTHENTICATION and not request.user.has_perm('storage.can_sync'):
return HttpResponse("Forbidden: only synchronization users can access this page.", status=403)
# check for compatible sync protocol version
sync_protocol = None
if 'sync_protocol' in request.GET:
for _prot in request.GET.getlist('sync_protocol'):
if _prot in settings.SYNC_PROTOCOLS:
sync_protocol = _prot
break
if not sync_protocol:
# either no sync protocol parameter was send (which means the client is
# pre 3.0 and that synchronization protocol is no longer supported) or
# no match was found between the client and server supported sync
# protocols
return HttpResponse(status=501)
response = HttpResponse(status=200, content_type='application/zip')
response['Metashare-Version'] = settings.METASHARE_VERSION
response['Content-Disposition'] = 'attachment; filename="inventory.zip"'
response['Sync-Protocol'] = sync_protocol
# collect inventory for existing resources;
# consists of key - value pairs of resource identifiers and digest checksums
json_response = {}
objects_to_sync = StorageObject.objects \
.filter(Q(copy_status=MASTER) | Q(copy_status=PROXY)) \
.exclude(publication_status=INTERNAL)
# 'from' parameter for restricting the inventory CAN NOT be used anymore
# since it would break to automatic detection of deleted resources
# if 'from' in request.GET:
# try:
# fromdate = dateutil.parser.parse(request.GET['from'])
# objects_to_sync = objects_to_sync.filter(digest_modified__gte=fromdate)
# except ValueError:
# # If we cannot parse the date string, act as if none was provided
# pass
for obj in objects_to_sync:
json_response[obj.identifier] = obj.get_digest_checksum()
with ZipFile(response, 'w') as outzip:
outzip.writestr('inventory.json', json.dumps(json_response))
return response
def full_metadata(request, resource_uuid):
if settings.SYNC_NEEDS_AUTHENTICATION and not request.user.has_perm('storage.can_sync'):
return HttpResponse("Forbidden: only synchronization users can access this page.", status=403)
storage_object = get_object_or_404(StorageObject, identifier=resource_uuid)
if storage_object.publication_status == INTERNAL:
return HttpResponse("Forbidden: the given resource is internal at this time.", status=403)
if storage_object.copy_status == REMOTE:
return HttpResponse("Forbidden: the specified resource is a `REMOTE` " \
"resource and cannot be distributed by this node.", status=403)
response = HttpResponse(status=200, content_type='application/zip')
response['Metashare-Version'] = settings.METASHARE_VERSION
response['Content-Disposition'] = 'attachment; filename="full-metadata.zip"'
if storage_object.digest_checksum is None:
storage_object.update_storage()
#if storage_object.digest_checksum is None: # still no digest? something is very wrong here:
# raise Exception("Object {0} has no digest".format(resource_uuid))
zipfilename = "{0}/resource.zip".format(storage_object._storage_folder())
with open(zipfilename, 'rb') as inzip:
zipfiledata = inzip.read()
response.write(zipfiledata)
# with ZipFile(response, 'w') as outzip:
# outzip.writestr('storage-global.json', str(storage_object.identifier))
# outzip.writestr('metadata.xml', storage_object.metadata.encode('utf-8'))
return response
|
{
"content_hash": "441199d3e214e26b360838685e2bb215",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 102,
"avg_line_length": 48.888888888888886,
"alnum_prop": 0.696969696969697,
"repo_name": "MiltosD/CEF-ELRC",
"id": "4368be2c3d0a31a03a7fa6fe24ef84f15780d232",
"size": "3960",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "metashare/sync/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7362"
},
{
"name": "C",
"bytes": "321"
},
{
"name": "C++",
"bytes": "112277"
},
{
"name": "CSS",
"bytes": "220423"
},
{
"name": "HTML",
"bytes": "2722281"
},
{
"name": "Java",
"bytes": "12780"
},
{
"name": "JavaScript",
"bytes": "362648"
},
{
"name": "Makefile",
"bytes": "26172"
},
{
"name": "Python",
"bytes": "10258932"
},
{
"name": "Shell",
"bytes": "111376"
},
{
"name": "XSLT",
"bytes": "473763"
}
],
"symlink_target": ""
}
|
"""Implementation of SQLAlchemy backend."""
import collections
import datetime as dt
import functools
import re
import sys
import threading
import time
import uuid
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db import options
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
import six
import sqlalchemy
from sqlalchemy import MetaData
from sqlalchemy import or_, and_, case
from sqlalchemy.orm import joinedload, joinedload_all
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy.schema import Table
from sqlalchemy import sql
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import true
from sqlalchemy.sql import func
from sqlalchemy.sql import sqltypes
from jacket.api.storage import common
from jacket.common.storage import sqlalchemyutils
from jacket.db import storage
from jacket.db.storage.sqlalchemy import models
from jacket.storage import exception
from jacket.storage.i18n import _, _LW, _LE, _LI
from jacket.objects.storage import fields
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
options.set_defaults(CONF, connection='sqlite:///$state_path/storage.sqlite')
_LOCK = threading.Lock()
_FACADE = None
def _create_facade_lazily():
global _LOCK
with _LOCK:
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database)
)
# NOTE(geguileo): To avoid a cyclical dependency we import the
# group here. Dependency cycle is objects.base requires storage.api,
# which requires storage.sqlalchemy.api, which requires service which
# requires objects.base
CONF.import_group("profiler", "jacket.service")
if CONF.profiler.enabled:
if CONF.profiler.trace_sqlalchemy:
osprofiler_sqlalchemy.add_tracing(sqlalchemy,
_FACADE.get_engine(),
"storage")
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def dispose_engine():
get_engine().dispose()
_DEFAULT_QUOTA_NAME = 'default'
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
LOG.warning(_LW('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
Requires the wrapped function to use context and volume_id as
their first two arguments.
"""
def wrapper(context, volume_id, *args, **kwargs):
volume_get(context, volume_id)
return f(context, volume_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_snapshot_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and snapshot_id as
their first two arguments.
"""
def wrapper(context, snapshot_id, *args, **kwargs):
snapshot_get(context, snapshot_id)
return f(context, snapshot_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warning(_LW("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def handle_db_data_error(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exception.Invalid(msg)
return wrapper
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
elif read_deleted == 'int_no':
query = query.filter_by(deleted=0)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def _sync_volumes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(volumes, _gigs) = _volume_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'volumes'
if volume_type_name:
key += '_' + volume_type_name
return {key: volumes}
def _sync_snapshots(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(snapshots, _gigs) = _snapshot_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'snapshots'
if volume_type_name:
key += '_' + volume_type_name
return {key: snapshots}
def _sync_backups(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(backups, _gigs) = _backup_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'backups'
return {key: backups}
def _sync_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(_junk, vol_gigs) = _volume_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'gigabytes'
if volume_type_name:
key += '_' + volume_type_name
if CONF.no_snapshot_gb_quota:
return {key: vol_gigs}
(_junk, snap_gigs) = _snapshot_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
return {key: vol_gigs + snap_gigs}
def _sync_consistencygroups(context, project_id, session,
volume_type_id=None,
volume_type_name=None):
(_junk, groups) = _consistencygroup_data_get_for_project(
context, project_id, session=session)
key = 'consistencygroups'
return {key: groups}
def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
key = 'backup_gigabytes'
(_junk, backup_gigs) = _backup_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
return {key: backup_gigs}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_consistencygroups': _sync_consistencygroups,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes
}
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id, session=session)
service_ref.delete(session=session)
@require_admin_context
def _service_get(context, service_id, session=None):
result = model_query(
context,
models.Service,
session=session).\
filter_by(id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get(context, service_id):
return _service_get(context, service_id)
@require_admin_context
def service_get_all(context, filters=None):
if filters and not is_valid_model_filters(models.Service, filters):
return []
query = model_query(context, models.Service)
if filters:
try:
host = filters.pop('host')
host_attr = models.Service.host
conditions = or_(host_attr ==
host, host_attr.op('LIKE')(host + '@%'))
query = query.filter(conditions)
except KeyError:
pass
query = query.filter_by(**filters)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic, disabled=None):
query = model_query(
context, models.Service, read_deleted="no").\
filter_by(topic=topic)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_binary(context, binary, disabled=None):
query = model_query(
context, models.Service, read_deleted="no").filter_by(binary=binary)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
result = model_query(
context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
if not result:
raise exception.ServiceNotFound(service_id=topic,
host=host)
return result
@require_admin_context
def service_get_by_args(context, host, binary):
results = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
all()
for result in results:
if host == result['host']:
return result
raise exception.ServiceNotFound(service_id=binary,
host=host)
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
session = get_session()
with session.begin():
service_ref.save(session)
return service_ref
@require_admin_context
@_retry_on_deadlock
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id, session=session)
if ('disabled' in values):
service_ref['modified_at'] = timeutils.utcnow()
service_ref['updated_at'] = literal_column('updated_at')
service_ref.update(values)
return service_ref
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _dict_with_extra_specs_if_authorized(context, inst_type_query):
"""Convert type query result to dict with extra_spec and rate_limit.
Takes a volume type query returned by sqlalchemy and returns it
as a dictionary, converting the extra_specs entry from a list
of dicts. NOTE the contents of extra-specs are admin readable
only. If the context passed in for this request is not admin
then we will return an empty extra-specs dict rather than
providing the admin only details.
Example response with admin context:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
if not is_admin_context(context):
del(inst_type_dict['extra_specs'])
else:
extra_specs = {x['key']: x['value']
for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
###################
@require_context
def _quota_get(context, project_id, resource, session=None):
result = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get(context, project_id, resource):
return _quota_get(context, project_id, resource)
@require_context
def quota_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_allocated_get_all_by_project(context, project_id):
rows = model_query(context, models.Quota, read_deleted='no').filter_by(
project_id=project_id).all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.allocated
return result
@require_context
def _quota_get_by_resource(context, resource, session=None):
rows = model_query(context, models.Quota,
session=session,
read_deleted='no').filter_by(
resource=resource).all()
return rows
@require_admin_context
def quota_create(context, project_id, resource, limit, allocated):
quota_ref = models.Quota()
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
if allocated:
quota_ref.allocated = allocated
session = get_session()
with session.begin():
quota_ref.save(session)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.hard_limit = limit
return quota_ref
@require_context
def quota_update_resource(context, old_res, new_res):
session = get_session()
with session.begin():
quotas = _quota_get_by_resource(context, old_res, session=session)
for quota in quotas:
quota.resource = new_res
@require_admin_context
def quota_allocated_update(context, project_id, resource, allocated):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.allocated = allocated
return quota_ref
@require_admin_context
def quota_destroy(context, project_id, resource):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.delete(session=session)
###################
@require_context
def _quota_class_get(context, class_name, resource, session=None):
result = model_query(context, models.QuotaClass, session=session,
read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@require_context
def quota_class_get(context, class_name, resource):
return _quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass,
read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def _quota_class_get_all_by_resource(context, resource, session):
result = model_query(context, models.QuotaClass,
session=session,
read_deleted="no").\
filter_by(resource=resource).\
all()
return result
@handle_db_data_error
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
session = get_session()
with session.begin():
quota_class_ref.save(session)
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
session = get_session()
with session.begin():
quota_class_ref = _quota_class_get(context, class_name, resource,
session=session)
quota_class_ref.hard_limit = limit
return quota_class_ref
@require_context
def quota_class_update_resource(context, old_res, new_res):
session = get_session()
with session.begin():
quota_class_list = _quota_class_get_all_by_resource(
context, old_res, session)
for quota_class in quota_class_list:
quota_class.resource = new_res
@require_admin_context
def quota_class_destroy(context, class_name, resource):
session = get_session()
with session.begin():
quota_class_ref = _quota_class_get(context, class_name, resource,
session=session)
quota_class_ref.delete(session=session)
@require_admin_context
def quota_class_destroy_all_by_name(context, class_name):
session = get_session()
with session.begin():
quota_classes = model_query(context, models.QuotaClass,
session=session, read_deleted="no").\
filter_by(class_name=class_name).\
all()
for quota_class_ref in quota_classes:
quota_class_ref.delete(session=session)
###################
@require_context
def quota_usage_get(context, project_id, resource):
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
@require_context
def quota_usage_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
return result
@require_admin_context
def _quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.save(session=session)
return quota_usage_ref
###################
def _reservation_create(context, uuid, usage, project_id, resource, delta,
expire, session=None, allocated_id=None):
usage_id = usage['id'] if usage else None
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.allocated_id = allocated_id
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_quota_usages(context, session, project_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
order_by(models.QuotaUsage.id.asc()).\
with_lockmode('update').\
all()
return {row.resource: row for row in rows}
def _get_quota_usages_by_resource(context, session, resource):
rows = model_query(context, models.QuotaUsage,
deleted="no",
session=session).\
filter_by(resource=resource).\
order_by(models.QuotaUsage.id.asc()).\
with_lockmode('update').\
all()
return rows
@require_context
@_retry_on_deadlock
def quota_usage_update_resource(context, old_res, new_res):
session = get_session()
with session.begin():
usages = _get_quota_usages_by_resource(context, session, old_res)
for usage in usages:
usage.resource = new_res
usage.until_refresh = 1
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None,
is_allocated_reserve=False):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
# Get the current usages
usages = _get_quota_usages(context, session, project_id)
allocated = quota_allocated_get_all_by_project(context, project_id)
allocated.pop('project_id')
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if resource not in usages:
usages[resource] = _quota_usage_create(elevated,
project_id,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif usages[resource].until_refresh is not None:
usages[resource].until_refresh -= 1
if usages[resource].until_refresh <= 0:
refresh = True
elif max_age and usages[resource].updated_at is not None and (
(usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age):
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
volume_type_id = getattr(resources[resource],
'volume_type_id', None)
volume_type_name = getattr(resources[resource],
'volume_type_name', None)
updates = sync(elevated, project_id,
volume_type_id=volume_type_id,
volume_type_name=volume_type_name,
session=session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
usages[res] = _quota_usage_create(
elevated,
project_id,
res,
0, 0,
until_refresh or None,
session=session
)
# Update the usage
usages[res].in_use = in_use
usages[res].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
if is_allocated_reserve:
unders = [r for r, delta in deltas.items()
if delta < 0 and delta + allocated.get(r, 0) < 0]
else:
unders = [r for r, delta in deltas.items()
if delta < 0 and delta + usages[r].in_use < 0]
# TODO(mc_nair): Should ignore/zero alloc if using non-nested driver
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
overs = [r for r, delta in deltas.items()
if quotas[r] >= 0 and delta >= 0 and
quotas[r] < delta + usages[r].total + allocated.get(r, 0)]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for resource, delta in deltas.items():
usage = usages[resource]
allocated_id = None
if is_allocated_reserve:
try:
quota = _quota_get(context, project_id, resource,
session=session)
except exception.ProjectQuotaNotFound:
# If we were using the default quota, create DB entry
quota = quota_create(context, project_id, resource,
quotas[resource], 0)
# Since there's no reserved/total for allocated, update
# allocated immediately and subtract on rollback if needed
quota_allocated_update(context, project_id, resource,
quota.allocated + delta)
allocated_id = quota.id
usage = None
reservation = _reservation_create(
elevated, str(uuid.uuid4()), usage, project_id, resource,
delta, expire, session=session, allocated_id=allocated_id)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0 and not is_allocated_reserve:
usages[resource].reserved += delta
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
usages = {k: dict(in_use=v.in_use, reserved=v.reserved,
allocated=allocated.get(k, 0))
for k, v in usages.items()}
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages=usages)
return reservations
def _quota_reservations(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update').\
all()
def _dict_with_usage_id(usages):
return {row.id: row for row in usages.values()}
@require_context
@_retry_on_deadlock
def reservation_commit(context, reservations, project_id=None):
session = get_session()
with session.begin():
usages = _get_quota_usages(context, session, project_id)
usages = _dict_with_usage_id(usages)
for reservation in _quota_reservations(session, context, reservations):
# Allocated reservations will have already been bumped
if not reservation.allocated_id:
usage = usages[reservation.usage_id]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation.delete(session=session)
@require_context
@_retry_on_deadlock
def reservation_rollback(context, reservations, project_id=None):
session = get_session()
with session.begin():
usages = _get_quota_usages(context, session, project_id)
usages = _dict_with_usage_id(usages)
for reservation in _quota_reservations(session, context, reservations):
if reservation.allocated_id:
reservation.quota.allocated -= reservation.delta
else:
usage = usages[reservation.usage_id]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation.delete(session=session)
def quota_destroy_by_project(*args, **kwargs):
"""Destroy all limit quotas associated with a project.
Leaves usage and reservation quotas intact.
"""
quota_destroy_all_by_project(only_quotas=True, *args, **kwargs)
@require_admin_context
@_retry_on_deadlock
def quota_destroy_all_by_project(context, project_id, only_quotas=False):
"""Destroy all quotas associated with a project.
This includes limit quotas, usage quotas and reservation quotas.
Optionally can only remove limit quotas and leave other types as they are.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
:param only_quotas: Only delete limit quotas, leave other types intact.
"""
session = get_session()
with session.begin():
quotas = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_ref in quotas:
quota_ref.delete(session=session)
if only_quotas:
return
quota_usages = model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_usage_ref in quota_usages:
quota_usage_ref.delete(session=session)
reservations = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
for reservation_ref in reservations:
reservation_ref.delete(session=session)
@require_admin_context
@_retry_on_deadlock
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
results = model_query(context, models.Reservation, session=session,
read_deleted="no").\
filter(models.Reservation.expire < current_time).\
all()
if results:
for reservation in results:
if reservation.delta >= 0:
if reservation.allocated_id:
reservation.quota.allocated -= reservation.delta
reservation.quota.save(session=session)
else:
reservation.usage.reserved -= reservation.delta
reservation.usage.save(session=session)
reservation.delete(session=session)
###################
@require_admin_context
def volume_attach(context, values):
volume_attachment_ref = models.VolumeAttachment()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_attachment_ref.update(values)
session = get_session()
with session.begin():
volume_attachment_ref.save(session=session)
return volume_attachment_get(context, values['id'],
session=session)
@require_admin_context
def volume_attached(context, attachment_id, instance_uuid, host_name,
mountpoint, attach_mode='rw'):
"""This method updates a volume attachment entry.
This function saves the information related to a particular
attachment for a volume. It also updates the volume record
to mark the volume as attached.
"""
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
volume_attachment_ref = volume_attachment_get(context, attachment_id,
session=session)
volume_attachment_ref['mountpoint'] = mountpoint
volume_attachment_ref['attach_status'] = 'attached'
volume_attachment_ref['instance_uuid'] = instance_uuid
volume_attachment_ref['attached_host'] = host_name
volume_attachment_ref['attach_time'] = timeutils.utcnow()
volume_attachment_ref['attach_mode'] = attach_mode
volume_ref = _volume_get(context, volume_attachment_ref['volume_id'],
session=session)
volume_attachment_ref.save(session=session)
volume_ref['status'] = 'in-use'
volume_ref['attach_status'] = 'attached'
volume_ref.save(session=session)
return volume_ref
@handle_db_data_error
@require_context
def volume_create(context, values):
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
models.VolumeMetadata)
if is_admin_context(context):
values['volume_admin_metadata'] = \
_metadata_refs(values.get('admin_metadata'),
models.VolumeAdminMetadata)
elif values.get('volume_admin_metadata'):
del values['volume_admin_metadata']
volume_ref = models.Volume()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_ref.update(values)
session = get_session()
with session.begin():
session.add(volume_ref)
return _volume_get(context, values['id'], session=session)
def get_booleans_for_table(table_name):
booleans = set()
table = getattr(models, table_name.capitalize())
if hasattr(table, '__table__'):
columns = table.__table__.columns
for column in columns:
if isinstance(column.type, sqltypes.Boolean):
booleans.add(column.name)
return booleans
@require_admin_context
def volume_data_get_for_host(context, host, count_only=False):
host_attr = models.Volume.host
conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')]
if count_only:
result = model_query(context,
func.count(models.Volume.id),
read_deleted="no").filter(
or_(*conditions)).first()
return result[0] or 0
else:
result = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no").filter(
or_(*conditions)).first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _volume_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
query = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _backup_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
query = model_query(context,
func.count(models.Backup.id),
func.sum(models.Backup.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def volume_data_get_for_project(context, project_id, volume_type_id=None):
return _volume_data_get_for_project(context, project_id, volume_type_id)
@require_admin_context
@_retry_on_deadlock
def volume_destroy(context, volume_id):
session = get_session()
now = timeutils.utcnow()
with session.begin():
model_query(context, models.Volume, session=session).\
filter_by(id=volume_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at'),
'migration_status': None})
model_query(context, models.VolumeMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeAdminMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.Transfer, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
@require_admin_context
def volume_detached(context, volume_id, attachment_id):
"""This updates a volume attachment and marks it as detached.
This method also ensures that the volume entry is correctly
marked as either still attached/in-use or detached/available
if this was the last detachment made.
"""
session = get_session()
with session.begin():
attachment = None
try:
attachment = volume_attachment_get(context, attachment_id,
session=session)
except exception.VolumeAttachmentNotFound:
pass
# If this is already detached, attachment will be None
if attachment:
now = timeutils.utcnow()
attachment['attach_status'] = 'detached'
attachment['detach_time'] = now
attachment['deleted'] = True
attachment['deleted_at'] = now
attachment.save(session=session)
attachment_list = volume_attachment_get_used_by_volume_id(
context, volume_id, session=session)
remain_attachment = False
if attachment_list and len(attachment_list) > 0:
remain_attachment = True
volume_ref = _volume_get(context, volume_id, session=session)
if not remain_attachment:
# Hide status update from user if we're performing volume migration
# or uploading it to image
if ((not volume_ref['migration_status'] and
not (volume_ref['status'] == 'uploading')) or
volume_ref['migration_status'] in ('success', 'error')):
volume_ref['status'] = 'available'
volume_ref['attach_status'] = 'detached'
volume_ref.save(session=session)
else:
# Volume is still attached
volume_ref['status'] = 'in-use'
volume_ref['attach_status'] = 'attached'
volume_ref.save(session=session)
@require_context
def _volume_get_query(context, session=None, project_only=False,
joined_load=True):
"""Get the query to retrieve the volume.
:param context: the context used to run the method _volume_get_query
:param session: the session to use
:param project_only: the boolean used to decide whether to query the
volume in the current project or all projects
:param joined_load: the boolean used to decide whether the query loads
the other models, which join the volume model in
the database. Currently, the False value for this
parameter is specially for the case of updating
database during volume migration
:returns: updated query or None
"""
if not joined_load:
return model_query(context, models.Volume, session=session,
project_only=project_only)
if is_admin_context(context):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup'))
else:
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup'))
@require_context
def _volume_get(context, volume_id, session=None, joined_load=True):
result = _volume_get_query(context, session=session, project_only=True,
joined_load=joined_load)
if joined_load:
result = result.options(joinedload('volume_type.extra_specs'))
result = result.filter_by(id=volume_id).first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_context
def volume_attachment_get(context, attachment_id, session=None):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(id=attachment_id).\
first()
if not result:
raise exception.VolumeAttachmentNotFound(filter='attachment_id = %s' %
attachment_id)
return result
@require_context
def volume_attachment_get_used_by_volume_id(context, volume_id, session=None):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter(models.VolumeAttachment.attach_status != 'detached').\
all()
return result
@require_context
def volume_attachment_get_by_host(context, volume_id, host):
session = get_session()
with session.begin():
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter_by(attached_host=host).\
filter(models.VolumeAttachment.attach_status != 'detached').\
first()
return result
@require_context
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
session = get_session()
with session.begin():
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter_by(instance_uuid=instance_uuid).\
filter(models.VolumeAttachment.attach_status != 'detached').\
first()
return result
@require_context
def volume_get(context, volume_id):
return _volume_get(context, volume_id)
@require_admin_context
def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None,
filters=None, offset=None):
"""Retrieves all volumes.
If no sort parameters are specified then the returned volumes are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
session = get_session()
with session.begin():
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters, offset)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_admin_context
def volume_get_all_by_host(context, host, filters=None):
"""Retrieves all volumes hosted on a host.
:param context: context to query under
:param host: host for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
# As a side effect of the introduction of pool-aware scheduler,
# newly created volumes will have pool information appended to
# 'host' field of a volume record. So a volume record in DB can
# now be either form below:
# Host
# Host#Pool
if host and isinstance(host, six.string_types):
session = get_session()
with session.begin():
host_attr = getattr(models.Volume, 'host')
conditions = [host_attr == host,
host_attr.op('LIKE')(host + '#%')]
query = _volume_get_query(context).filter(or_(*conditions))
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
elif not host:
return []
@require_context
def volume_get_all_by_group(context, group_id, filters=None):
"""Retrieves all volumes associated with the group_id.
:param context: context to query under
:param group_id: group ID for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
query = _volume_get_query(context).filter_by(consistencygroup_id=group_id)
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_context
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
offset=None):
"""Retrieves all volumes in a project.
If no sort parameters are specified then the returned volumes are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param project_id: project for all volumes being retrieved
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
session = get_session()
with session.begin():
authorize_project_context(context, project_id)
# Add in the project filter without modifying the given filters
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters, offset)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
def _generate_paginate_query(context, session, marker, limit, sort_keys,
sort_dirs, filters, offset=None,
paginate_type=models.Volume):
"""Generate the query to include the filters and the paginate options.
Returns a query with sorting / pagination criteria added or None
if the given filters will not yield any results.
:param context: context to query under
:param session: the session to use
:param marker: the last item of the previous page; we returns the next
results after this value.
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:param offset: number of items to skip
:param paginate_type: type of pagination to generate
:returns: updated query or None
"""
get_query, process_filters, get = PAGINATION_HELPERS[paginate_type]
sort_keys, sort_dirs = process_sort_params(sort_keys,
sort_dirs,
default_dir='desc')
query = get_query(context, session=session)
if filters:
query = process_filters(query, filters)
if query is None:
return None
marker_object = None
if marker is not None:
marker_object = get(context, marker, session)
return sqlalchemyutils.paginate_query(query, paginate_type, limit,
sort_keys,
marker=marker_object,
sort_dirs=sort_dirs,
offset=offset)
def _process_volume_filters(query, filters):
"""Common filter processing for Volume queries.
Filter values that are in lists, tuples, or sets cause an 'IN' operator
to be used, while exact matching ('==' operator) is used for other values.
A filter key/value of 'no_migration_targets'=True causes volumes with
either a NULL 'migration_status' or a 'migration_status' that does not
start with 'target:' to be retrieved.
A 'metadata' filter key must correspond to a dictionary value of metadata
key-value pairs.
:param query: Model query to use
:param filters: dictionary of filters
:returns: updated query or None
"""
filters = filters.copy()
filters.pop('changes-since', None)
# 'no_migration_targets' is unique, must be either NULL or
# not start with 'target:'
if filters.get('no_migration_targets', False):
filters.pop('no_migration_targets')
try:
column_attr = getattr(models.Volume, 'migration_status')
conditions = [column_attr == None, # noqa
column_attr.op('NOT LIKE')('target:%')]
query = query.filter(or_(*conditions))
except AttributeError:
LOG.debug("'migration_status' column could not be found.")
return None
# Apply exact match filters for everything else, ensure that the
# filter value exists on the model
for key in filters.keys():
# metadata is unique, must be a dict
if key == 'metadata':
if not isinstance(filters[key], dict):
LOG.debug("'metadata' filter value is not valid.")
return None
continue
try:
column_attr = getattr(models.Volume, key)
# Do not allow relationship properties since those require
# schema specific knowledge
prop = getattr(column_attr, 'property')
if isinstance(prop, RelationshipProperty):
LOG.debug(("'%s' filter key is not valid, "
"it maps to a relationship."), key)
return None
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return None
# Holds the simple exact matches
filter_dict = {}
# Iterate over all filters, special case the filter if necessary
for key, value in filters.items():
if key == 'metadata':
# model.VolumeMetadata defines the backref to Volumes as
# 'volume_metadata' or 'volume_admin_metadata', use those as
# column attribute keys
col_attr = getattr(models.Volume, 'volume_metadata')
col_ad_attr = getattr(models.Volume, 'volume_admin_metadata')
for k, v in value.items():
query = query.filter(or_(col_attr.any(key=k, value=v),
col_ad_attr.any(key=k, value=v)))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(models.Volume, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def process_sort_params(sort_keys, sort_dirs, default_keys=None,
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
if default_keys is None:
default_keys = ['created_at', 'id']
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs):
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys.
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction array size exceeds sort key array size.")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@handle_db_data_error
@require_context
def volume_update(context, volume_id, values):
session = get_session()
with session.begin():
metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True,
session=session)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(context,
volume_id,
values.pop('admin_metadata'),
delete=True,
session=session)
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref.update(values)
return volume_ref
@require_context
def volume_attachment_update(context, attachment_id, values):
session = get_session()
with session.begin():
volume_attachment_ref = volume_attachment_get(context, attachment_id,
session=session)
volume_attachment_ref.update(values)
volume_attachment_ref.save(session=session)
return volume_attachment_ref
def volume_update_status_based_on_attachment(context, volume_id):
"""Update volume status based on attachment.
Get volume and check if 'volume_attachment' parameter is present in volume.
If 'volume_attachment' is None then set volume status to 'available'
else set volume status to 'in-use'.
:param context: context to query under
:param volume_id: id of volume to be updated
:returns: updated volume
"""
session = get_session()
with session.begin():
volume_ref = _volume_get(context, volume_id, session=session)
# We need to get and update volume using same session because
# there is possibility that instance is deleted between the 'get'
# and 'update' volume call.
if not volume_ref['volume_attachment']:
volume_ref.update({'status': 'available'})
else:
volume_ref.update({'status': 'in-use'})
return volume_ref
def volume_has_snapshots_filter():
return sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted))
def volume_has_undeletable_snapshots_filter():
deletable_statuses = ['available', 'error']
return sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted,
or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None
models.Snapshot.status.notin_(deletable_statuses))))
def volume_has_attachments_filter():
return sql.exists().where(
and_(models.Volume.id == models.VolumeAttachment.volume_id,
models.VolumeAttachment.attach_status != 'detached',
~models.VolumeAttachment.deleted))
####################
def _volume_x_metadata_get_query(context, volume_id, model, session=None):
return model_query(context, model, session=session, read_deleted="no").\
filter_by(volume_id=volume_id)
def _volume_x_metadata_get(context, volume_id, model, session=None):
rows = _volume_x_metadata_get_query(context, volume_id, model,
session=session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec,
session=None):
result = _volume_x_metadata_get_query(context, volume_id,
model, session=session).\
filter_by(key=key).\
first()
if not result:
if model is models.VolumeGlanceMetadata:
raise notfound_exec(id=volume_id)
else:
raise notfound_exec(metadata_key=key, volume_id=volume_id)
return result
def _volume_x_metadata_update(context, volume_id, metadata, delete, model,
session=None, add=True, update=True):
session = session or get_session()
metadata = metadata.copy()
with session.begin(subtransactions=True):
# Set existing metadata to deleted if delete argument is True. This is
# committed immediately to the DB
if delete:
expected_values = {'volume_id': volume_id}
# We don't want to delete keys we are going to update
if metadata:
expected_values['key'] = storage.Not(metadata.keys())
conditional_update(context, model, {'deleted': True},
expected_values)
# Get existing metadata
db_meta = _volume_x_metadata_get_query(context, volume_id, model).all()
save = []
skip = []
# We only want to send changed metadata.
for row in db_meta:
if row.key in metadata:
value = metadata.pop(row.key)
if row.value != value and update:
# ORM objects will not be saved until we do the bulk save
row.value = value
save.append(row)
continue
skip.append(row)
# We also want to save non-existent metadata
if add:
save.extend(model(key=key, value=value, volume_id=volume_id)
for key, value in metadata.items())
# Do a bulk save
if save:
session.bulk_save_objects(save, update_changed_only=True)
# Construct result dictionary with current metadata
save.extend(skip)
result = {row['key']: row['value'] for row in save}
return result
def _volume_user_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeMetadata, session=session)
def _volume_image_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeGlanceMetadata,
session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeMetadata, session=session)
@require_context
def _volume_user_metadata_get_item(context, volume_id, key, session=None):
return _volume_x_metadata_get_item(context, volume_id, key,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeMetadata,
session=session)
@require_context
@require_volume_exists
def _volume_image_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeGlanceMetadata,
session=session)
@require_context
def _volume_glance_metadata_key_to_id(context, volume_id, key):
db_data = volume_glance_metadata_get(context, volume_id)
metadata = {meta_entry.key: meta_entry.id
for meta_entry in db_data
if meta_entry.key == key}
metadata_id = metadata[key]
return metadata_id
@require_context
@require_volume_exists
def volume_metadata_get(context, volume_id):
return _volume_user_metadata_get(context, volume_id)
@require_context
@require_volume_exists
@_retry_on_deadlock
def volume_metadata_delete(context, volume_id, key, meta_type):
if meta_type == common.METADATA_TYPES.user:
(_volume_user_metadata_get_query(context, volume_id).
filter_by(key=key).
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
elif meta_type == common.METADATA_TYPES.image:
metadata_id = _volume_glance_metadata_key_to_id(context,
volume_id, key)
(_volume_image_metadata_get_query(context, volume_id).
filter_by(id=metadata_id).
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}))
else:
raise exception.InvalidMetadataType(metadata_type=meta_type,
id=volume_id)
@require_context
@require_volume_exists
@handle_db_data_error
@_retry_on_deadlock
def volume_metadata_update(context, volume_id, metadata, delete, meta_type):
if meta_type == common.METADATA_TYPES.user:
return _volume_user_metadata_update(context,
volume_id,
metadata,
delete)
elif meta_type == common.METADATA_TYPES.image:
return _volume_image_metadata_update(context,
volume_id,
metadata,
delete)
else:
raise exception.InvalidMetadataType(metadata_type=meta_type,
id=volume_id)
###################
def _volume_admin_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeAdminMetadata,
session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeAdminMetadata, session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_update(context, volume_id, metadata, delete,
session=None, add=True, update=True):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeAdminMetadata,
session=session, add=add, update=update)
@require_admin_context
@require_volume_exists
def volume_admin_metadata_get(context, volume_id):
return _volume_admin_metadata_get(context, volume_id)
@require_admin_context
@require_volume_exists
@_retry_on_deadlock
def volume_admin_metadata_delete(context, volume_id, key):
_volume_admin_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
@require_volume_exists
@_retry_on_deadlock
def volume_admin_metadata_update(context, volume_id, metadata, delete,
add=True, update=True):
return _volume_admin_metadata_update(context, volume_id, metadata, delete,
add=add, update=update)
###################
@handle_db_data_error
@require_context
def snapshot_create(context, values):
values['snapshot_metadata'] = _metadata_refs(values.get('metadata'),
models.SnapshotMetadata)
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
snapshot_ref = models.Snapshot()
snapshot_ref.update(values)
session.add(snapshot_ref)
return _snapshot_get(context, values['id'], session=session)
@require_admin_context
@_retry_on_deadlock
def snapshot_destroy(context, snapshot_id):
session = get_session()
with session.begin():
model_query(context, models.Snapshot, session=session).\
filter_by(id=snapshot_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
model_query(context, models.SnapshotMetadata, session=session).\
filter_by(snapshot_id=snapshot_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _snapshot_get(context, snapshot_id, session=None):
result = model_query(context, models.Snapshot, session=session,
project_only=True).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
filter_by(id=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result
@require_context
def snapshot_get(context, snapshot_id):
return _snapshot_get(context, snapshot_id)
@require_admin_context
def snapshot_get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
"""Retrieves all snapshots.
If no sorting parameters are specified then returned snapshots are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param filters: dictionary of filters; will do exact matching on values
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:returns: list of matching snapshots
"""
if filters and not is_valid_model_filters(models.Snapshot, filters):
return []
session = get_session()
with session.begin():
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters,
offset, models.Snapshot)
# No snapshots would match, return empty list
if not query:
return []
return query.all()
def _snaps_get_query(context, session=None, project_only=False):
return model_query(context, models.Snapshot, session=session,
project_only=project_only).\
options(joinedload('snapshot_metadata'))
def _process_snaps_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Snapshot, filters):
return None
query = query.filter_by(**filters)
return query
@require_context
def snapshot_get_all_for_volume(context, volume_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(volume_id=volume_id).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_by_host(context, host, filters=None):
if filters and not is_valid_model_filters(models.Snapshot, filters):
return []
query = model_query(context, models.Snapshot, read_deleted='no',
project_only=True)
if filters:
query = query.filter_by(**filters)
# As a side effect of the introduction of pool-aware scheduler,
# newly created volumes will have pool information appended to
# 'host' field of a volume record. So a volume record in DB can
# now be either form below:
# Host
# Host#Pool
if host and isinstance(host, six.string_types):
session = get_session()
with session.begin():
host_attr = getattr(models.Volume, 'host')
conditions = [host_attr == host,
host_attr.op('LIKE')(host + '#%')]
query = query.join(models.Snapshot.volume).filter(
or_(*conditions)).options(joinedload('snapshot_metadata'))
return query.all()
elif not host:
return []
@require_context
def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(cgsnapshot_id=cgsnapshot_id).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
""""Retrieves all snapshots in a project.
If no sorting parameters are specified then returned snapshots are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param project_id: project for all snapshots being retrieved
:param filters: dictionary of filters; will do exact matching on values
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:returns: list of matching snapshots
"""
if filters and not is_valid_model_filters(models.Snapshot, filters):
return []
authorize_project_context(context, project_id)
# Add project_id to filters
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
session = get_session()
with session.begin():
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters,
offset, models.Snapshot)
# No snapshots would match, return empty list
if not query:
return []
query = query.options(joinedload('snapshot_metadata'))
return query.all()
@require_context
def _snapshot_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
authorize_project_context(context, project_id)
query = model_query(context,
func.count(models.Snapshot.id),
func.sum(models.Snapshot.volume_size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.join('volume').filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_context
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
return _snapshot_data_get_for_project(context, project_id, volume_type_id)
@require_context
def snapshot_get_active_by_window(context, begin, end=None, project_id=None):
"""Return snapshots that were active during window."""
query = model_query(context, models.Snapshot, read_deleted="yes")
query = query.filter(or_(models.Snapshot.deleted_at == None, # noqa
models.Snapshot.deleted_at > begin))
query = query.options(joinedload(models.Snapshot.volume))
query = query.options(joinedload('snapshot_metadata'))
if end:
query = query.filter(models.Snapshot.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@handle_db_data_error
@require_context
def snapshot_update(context, snapshot_id, values):
session = get_session()
with session.begin():
snapshot_ref = _snapshot_get(context, snapshot_id, session=session)
snapshot_ref.update(values)
return snapshot_ref
####################
def _snapshot_metadata_get_query(context, snapshot_id, session=None):
return model_query(context, models.SnapshotMetadata,
session=session, read_deleted="no").\
filter_by(snapshot_id=snapshot_id)
@require_context
def _snapshot_metadata_get(context, snapshot_id, session=None):
rows = _snapshot_metadata_get_query(context, snapshot_id, session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_snapshot_exists
def snapshot_metadata_get(context, snapshot_id):
return _snapshot_metadata_get(context, snapshot_id)
@require_context
@require_snapshot_exists
@_retry_on_deadlock
def snapshot_metadata_delete(context, snapshot_id, key):
_snapshot_metadata_get_query(context, snapshot_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _snapshot_metadata_get_item(context, snapshot_id, key, session=None):
result = _snapshot_metadata_get_query(context,
snapshot_id,
session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.SnapshotMetadataNotFound(metadata_key=key,
snapshot_id=snapshot_id)
return result
@require_context
@require_snapshot_exists
@handle_db_data_error
@_retry_on_deadlock
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
session = get_session()
with session.begin():
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _snapshot_metadata_get(context, snapshot_id,
session)
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _snapshot_metadata_get_item(context,
snapshot_id,
meta_key, session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _snapshot_metadata_get_item(context, snapshot_id,
meta_key, session)
except exception.SnapshotMetadataNotFound:
meta_ref = models.SnapshotMetadata()
item.update({"key": meta_key, "snapshot_id": snapshot_id})
meta_ref.update(item)
meta_ref.save(session=session)
return snapshot_metadata_get(context, snapshot_id)
###################
@handle_db_data_error
@require_admin_context
def volume_type_create(context, values, projects=None):
"""Create a new volume type.
In order to pass in extra specs, the values dict should contain a
'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
if not values.get('id'):
values['id'] = str(uuid.uuid4())
projects = projects or []
session = get_session()
with session.begin():
try:
_volume_type_get_by_name(context, values['name'], session)
raise exception.VolumeTypeExists(id=values['name'])
except exception.VolumeTypeNotFoundByName:
pass
try:
_volume_type_get(context, values['id'], session)
raise exception.VolumeTypeExists(id=values['id'])
except exception.VolumeTypeNotFound:
pass
try:
values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
models.VolumeTypeExtraSpecs)
volume_type_ref = models.VolumeTypes()
volume_type_ref.update(values)
session.add(volume_type_ref)
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.VolumeTypeProjects()
access_ref.update({"volume_type_id": volume_type_ref.id,
"project_id": project})
access_ref.save(session=session)
return volume_type_ref
def _volume_type_get_query(context, session=None, read_deleted='no',
expected_fields=None):
expected_fields = expected_fields or []
query = model_query(context,
models.VolumeTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if 'projects' in expected_fields:
query = query.options(joinedload('projects'))
if not context.is_admin:
the_filter = [models.VolumeTypes.is_public == true()]
projects_attr = getattr(models.VolumeTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
def _process_volume_types_filters(query, filters):
context = filters.pop('context', None)
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.VolumeTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
projects_attr = getattr(models.VolumeTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
if 'is_public' in filters:
del filters['is_public']
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.VolumeTypes, filters):
return
if filters.get('extra_specs') is not None:
the_filter = []
searchdict = filters.get('extra_specs')
extra_specs = getattr(models.VolumeTypes, 'extra_specs')
for k, v in searchdict.items():
the_filter.extend([extra_specs.any(key=k, value=v,
deleted=False)])
if len(the_filter) > 1:
query = query.filter(and_(*the_filter))
else:
query = query.filter(the_filter[0])
del filters['extra_specs']
query = query.filter_by(**filters)
return query
@handle_db_data_error
@require_admin_context
def volume_type_update(context, volume_type_id, values):
session = get_session()
with session.begin():
# Check it exists
volume_type_ref = _volume_type_ref_get(context,
volume_type_id,
session)
if not volume_type_ref:
raise exception.VolumeTypeNotFound(type_id=volume_type_id)
# No description change
if values['description'] is None:
del values['description']
# No is_public change
if values['is_public'] is None:
del values['is_public']
# No name change
if values['name'] is None:
del values['name']
else:
# Volume type name is unique. If change to a name that belongs to
# a different volume_type , it should be prevented.
check_vol_type = None
try:
check_vol_type = \
_volume_type_get_by_name(context,
values['name'],
session=session)
except exception.VolumeTypeNotFoundByName:
pass
else:
if check_vol_type.get('id') != volume_type_id:
raise exception.VolumeTypeExists(id=values['name'])
volume_type_ref.update(values)
volume_type_ref.save(session=session)
return volume_type_ref
@require_context
def volume_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Returns a dict describing all volume_types with name as key.
If no sort parameters are specified then the returned volume types are
sorted first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_type_filters
function for more information
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:returns: list/dict of matching volume types
"""
session = get_session()
with session.begin():
# Add context for _process_volume_types_filters
filters = filters or {}
filters['context'] = context
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters, offset,
models.VolumeTypes)
# No volume types would match, return empty dict or list
if query is None:
if list_result:
return []
return {}
rows = query.all()
if list_result:
result = [_dict_with_extra_specs_if_authorized(context, row)
for row in rows]
return result
result = {row['name']: _dict_with_extra_specs_if_authorized(context,
row)
for row in rows}
return result
def _volume_type_get_id_from_volume_type_query(context, id, session=None):
return model_query(
context, models.VolumeTypes.id, read_deleted="no",
session=session, base_model=models.VolumeTypes).\
filter_by(id=id)
def _volume_type_get_id_from_volume_type(context, id, session=None):
result = _volume_type_get_id_from_volume_type_query(
context, id, session=session).first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return result[0]
def _volume_type_get_db_object(context, id, session=None, inactive=False,
expected_fields=None):
read_deleted = "yes" if inactive else "no"
result = _volume_type_get_query(
context, session, read_deleted, expected_fields).\
filter_by(id=id).\
first()
return result
@require_context
def _volume_type_get(context, id, session=None, inactive=False,
expected_fields=None):
expected_fields = expected_fields or []
result = _volume_type_get_db_object(context, id, session, inactive,
expected_fields)
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
vtype = _dict_with_extra_specs_if_authorized(context, result)
if 'projects' in expected_fields:
vtype['projects'] = [p['project_id'] for p in result['projects']]
return vtype
@require_context
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Return a dict describing specific volume_type."""
return _volume_type_get(context, id,
session=None,
inactive=inactive,
expected_fields=expected_fields)
def _volume_type_get_full(context, id):
"""Return dict for a specific volume_type with extra_specs and projects."""
return _volume_type_get(context, id, session=None, inactive=False,
expected_fields=('extra_specs', 'projects'))
@require_context
def _volume_type_ref_get(context, id, session=None, inactive=False):
read_deleted = "yes" if inactive else "no"
result = model_query(context,
models.VolumeTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return result
@require_context
def _volume_type_get_by_name(context, name, session=None):
result = model_query(context, models.VolumeTypes, session=session).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not result:
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return _dict_with_extra_specs_if_authorized(context, result)
@require_context
def volume_type_get_by_name(context, name):
"""Return a dict describing specific volume_type."""
return _volume_type_get_by_name(context, name)
@require_context
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Return a dict describing specific volume_type."""
req_volume_types = []
for vol_t in volume_type_list:
if not uuidutils.is_uuid_like(vol_t):
vol_type = _volume_type_get_by_name(context, vol_t)
else:
vol_type = _volume_type_get(context, vol_t)
req_volume_types.append(vol_type)
return req_volume_types
@require_admin_context
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
read_deleted = "yes" if inactive else "no"
return model_query(context, models.VolumeTypes,
read_deleted=read_deleted). \
filter_by(qos_specs_id=qos_specs_id).all()
@require_admin_context
def volume_type_qos_associate(context, type_id, qos_specs_id):
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
session.query(models.VolumeTypes). \
filter_by(id=type_id). \
update({'qos_specs_id': qos_specs_id,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from qos specs."""
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
session.query(models.VolumeTypes). \
filter_by(id=type_id). \
filter_by(qos_specs_id=qos_specs_id). \
update({'qos_specs_id': None,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types associated with specified qos specs."""
session = get_session()
with session.begin():
session.query(models.VolumeTypes). \
filter_by(qos_specs_id=qos_specs_id). \
update({'qos_specs_id': None,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_specs_get(context, type_id):
"""Return all qos specs for given volume type.
result looks like:
{
'qos_specs':
{
'id': 'qos-specs-id',
'name': 'qos_specs_name',
'consumer': 'Consumer',
'specs': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'
}
}
}
"""
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
row = session.query(models.VolumeTypes). \
options(joinedload('qos_specs')). \
filter_by(id=type_id). \
first()
# row.qos_specs is a list of QualityOfServiceSpecs ref
specs = _dict_with_qos_specs(row.qos_specs)
if not specs:
# turn empty list to None
specs = None
else:
specs = specs[0]
return {'qos_specs': specs}
@require_admin_context
@_retry_on_deadlock
def volume_type_destroy(context, id):
session = get_session()
with session.begin():
_volume_type_get(context, id, session)
results = model_query(context, models.Volume, session=session). \
filter_by(volume_type_id=id).all()
if results:
LOG.error(_LE('VolumeType %s deletion failed, '
'VolumeType in use.'), id)
raise exception.VolumeTypeInUse(volume_type_id=id)
model_query(context, models.VolumeTypes, session=session).\
filter_by(id=id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeTypeExtraSpecs, session=session).\
filter_by(volume_type_id=id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_get_active_by_window(context,
begin,
end=None,
project_id=None):
"""Return volumes that were active during window."""
query = model_query(context, models.Volume, read_deleted="yes")
query = query.filter(or_(models.Volume.deleted_at == None, # noqa
models.Volume.deleted_at > begin))
if end:
query = query.filter(models.Volume.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
query = (query.options(joinedload('volume_metadata')).
options(joinedload('volume_type')).
options(joinedload('volume_attachment')).
options(joinedload('consistencygroup')))
if is_admin_context(context):
query = query.options(joinedload('volume_admin_metadata'))
return query.all()
def _volume_type_access_query(context, session=None):
return model_query(context, models.VolumeTypeProjects, session=session,
read_deleted="int_no")
@require_admin_context
def volume_type_access_get_all(context, type_id):
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
return _volume_type_access_query(context).\
filter_by(volume_type_id=volume_type_id).all()
@require_admin_context
def volume_type_access_add(context, type_id, project_id):
"""Add given tenant to the volume type access list."""
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
access_ref = models.VolumeTypeProjects()
access_ref.update({"volume_type_id": volume_type_id,
"project_id": project_id})
session = get_session()
with session.begin():
try:
access_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.VolumeTypeAccessExists(volume_type_id=type_id,
project_id=project_id)
return access_ref
@require_admin_context
def volume_type_access_remove(context, type_id, project_id):
"""Remove given tenant from the volume type access list."""
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
count = (_volume_type_access_query(context).
filter_by(volume_type_id=volume_type_id).
filter_by(project_id=project_id).
soft_delete(synchronize_session=False))
if count == 0:
raise exception.VolumeTypeAccessNotFound(
volume_type_id=type_id, project_id=project_id)
####################
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id)
@require_context
def volume_type_extra_specs_get(context, volume_type_id):
rows = _volume_type_extra_specs_query(context, volume_type_id).\
all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
def volume_type_extra_specs_delete(context, volume_type_id, key):
session = get_session()
with session.begin():
_volume_type_extra_specs_get_item(context, volume_type_id, key,
session)
_volume_type_extra_specs_query(context, volume_type_id, session).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _volume_type_extra_specs_get_item(context, volume_type_id, key,
session=None):
result = _volume_type_extra_specs_query(
context, volume_type_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.VolumeTypeExtraSpecsNotFound(
extra_specs_key=key,
volume_type_id=volume_type_id)
return result
@handle_db_data_error
@require_context
def volume_type_extra_specs_update_or_create(context, volume_type_id,
specs):
session = get_session()
with session.begin():
spec_ref = None
for key, value in specs.items():
try:
spec_ref = _volume_type_extra_specs_get_item(
context, volume_type_id, key, session)
except exception.VolumeTypeExtraSpecsNotFound:
spec_ref = models.VolumeTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"volume_type_id": volume_type_id,
"deleted": False})
spec_ref.save(session=session)
return specs
####################
@require_admin_context
def qos_specs_create(context, values):
"""Create a new QoS specs.
:param values dictionary that contains specifications for QoS
e.g. {'name': 'Name',
'qos_specs': {
'consumer': 'front-end',
'total_iops_sec': 1000,
'total_bytes_sec': 1024000
}
}
"""
specs_id = str(uuid.uuid4())
session = get_session()
with session.begin():
try:
_qos_specs_get_by_name(context, values['name'], session)
raise exception.QoSSpecsExists(specs_id=values['name'])
except exception.QoSSpecsNotFound:
pass
try:
# Insert a root entry for QoS specs
specs_root = models.QualityOfServiceSpecs()
root = dict(id=specs_id)
# 'QoS_Specs_Name' is an internal reserved key to store
# the name of QoS specs
root['key'] = 'QoS_Specs_Name'
root['value'] = values['name']
LOG.debug("DB qos_specs_create(): root %s", root)
specs_root.update(root)
specs_root.save(session=session)
# Insert all specification entries for QoS specs
for k, v in values['qos_specs'].items():
item = dict(key=k, value=v, specs_id=specs_id)
item['id'] = str(uuid.uuid4())
spec_entry = models.QualityOfServiceSpecs()
spec_entry.update(item)
spec_entry.save(session=session)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exception.Invalid(msg)
except Exception as e:
raise db_exc.DBError(e)
return dict(id=specs_root.id, name=specs_root.value)
@require_admin_context
def _qos_specs_get_by_name(context, name, session=None, inactive=False):
read_deleted = 'yes' if inactive else 'no'
results = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted, session=session). \
filter_by(key='QoS_Specs_Name'). \
filter_by(value=name). \
options(joinedload('specs')).all()
if not results:
raise exception.QoSSpecsNotFound(specs_id=name)
return results
@require_admin_context
def _qos_specs_get_ref(context, qos_specs_id, session=None, inactive=False):
read_deleted = 'yes' if inactive else 'no'
result = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted, session=session). \
filter_by(id=qos_specs_id). \
options(joinedload_all('specs')).all()
if not result:
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
return result
def _dict_with_children_specs(specs):
"""Convert specs list to a dict."""
result = {}
for spec in specs:
# Skip deleted keys
if not spec['deleted']:
result.update({spec['key']: spec['value']})
return result
def _dict_with_qos_specs(rows):
"""Convert qos specs query results to list.
Qos specs query results are a list of quality_of_service_specs refs,
some are root entry of a qos specs (key == 'QoS_Specs_Name') and the
rest are children entry, a.k.a detailed specs for a qos specs. This
function converts query results to a dict using spec name as key.
"""
result = []
for row in rows:
if row['key'] == 'QoS_Specs_Name':
member = {}
member['name'] = row['value']
member.update(dict(id=row['id']))
if row.specs:
spec_dict = _dict_with_children_specs(row.specs)
member.update(dict(consumer=spec_dict['consumer']))
del spec_dict['consumer']
member.update(dict(specs=spec_dict))
result.append(member)
return result
@require_admin_context
def qos_specs_get(context, qos_specs_id, inactive=False):
rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
def qos_specs_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Returns a list of all qos_specs.
Results is like:
[{
'id': SPECS-UUID,
'name': 'qos_spec-1',
'consumer': 'back-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
{
'id': SPECS-UUID,
'name': 'qos_spec-2',
'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
]
"""
session = get_session()
with session.begin():
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters,
offset, models.QualityOfServiceSpecs)
# No Qos specs would match, return empty list
if query is None:
return []
rows = query.all()
return _dict_with_qos_specs(rows)
@require_admin_context
def _qos_specs_get_query(context, session):
rows = model_query(context, models.QualityOfServiceSpecs,
session=session,
read_deleted='no').\
options(joinedload_all('specs')).filter_by(key='QoS_Specs_Name')
return rows
def _process_qos_specs_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.QualityOfServiceSpecs, filters):
return
query = query.filter_by(**filters)
return query
@require_admin_context
def _qos_specs_get(context, qos_spec_id, session=None):
result = model_query(context, models.QualityOfServiceSpecs,
session=session,
read_deleted='no').\
filter_by(id=qos_spec_id).filter_by(key='QoS_Specs_Name').first()
if not result:
raise exception.QoSSpecsNotFound(specs_id=qos_spec_id)
return result
@require_admin_context
def qos_specs_get_by_name(context, name, inactive=False):
rows = _qos_specs_get_by_name(context, name, None, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
def qos_specs_associations_get(context, qos_specs_id):
"""Return all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_associations_get(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
# Raise QoSSpecsNotFound if no specs found
_qos_specs_get_ref(context, qos_specs_id, None)
return volume_type_qos_associations_get(context, qos_specs_id)
@require_admin_context
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate volume type from specified qos specs."""
return volume_type_qos_associate(context, type_id, qos_specs_id)
@require_admin_context
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from specified qos specs."""
return volume_type_qos_disassociate(context, qos_specs_id, type_id)
@require_admin_context
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_disassociate_all(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
return volume_type_qos_disassociate_all(context, qos_specs_id)
@require_admin_context
def qos_specs_item_delete(context, qos_specs_id, key):
session = get_session()
with session.begin():
_qos_specs_get_item(context, qos_specs_id, key)
session.query(models.QualityOfServiceSpecs). \
filter(models.QualityOfServiceSpecs.key == key). \
filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def qos_specs_delete(context, qos_specs_id):
session = get_session()
with session.begin():
_qos_specs_get_ref(context, qos_specs_id, session)
session.query(models.QualityOfServiceSpecs).\
filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id,
models.QualityOfServiceSpecs.specs_id ==
qos_specs_id)).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def _qos_specs_get_item(context, qos_specs_id, key, session=None):
result = model_query(context, models.QualityOfServiceSpecs,
session=session). \
filter(models.QualityOfServiceSpecs.key == key). \
filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \
first()
if not result:
raise exception.QoSSpecsKeyNotFound(
specs_key=key,
specs_id=qos_specs_id)
return result
@handle_db_data_error
@require_admin_context
def qos_specs_update(context, qos_specs_id, specs):
"""Make updates to an existing qos specs.
Perform add, update or delete key/values to a qos specs.
"""
session = get_session()
with session.begin():
# make sure qos specs exists
_qos_specs_get_ref(context, qos_specs_id, session)
spec_ref = None
for key in specs.keys():
try:
spec_ref = _qos_specs_get_item(
context, qos_specs_id, key, session)
except exception.QoSSpecsKeyNotFound:
spec_ref = models.QualityOfServiceSpecs()
id = None
if spec_ref.get('id', None):
id = spec_ref['id']
else:
id = str(uuid.uuid4())
value = dict(id=id, key=key, value=specs[key],
specs_id=qos_specs_id,
deleted=False)
LOG.debug('qos_specs_update() value: %s', value)
spec_ref.update(value)
spec_ref.save(session=session)
return specs
####################
@require_context
def volume_type_encryption_get(context, volume_type_id, session=None):
return model_query(context, models.Encryption, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id).first()
@require_admin_context
def volume_type_encryption_delete(context, volume_type_id):
session = get_session()
with session.begin():
encryption = volume_type_encryption_get(context, volume_type_id,
session)
if not encryption:
raise exception.VolumeTypeEncryptionNotFound(
type_id=volume_type_id)
encryption.update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@handle_db_data_error
@require_admin_context
def volume_type_encryption_create(context, volume_type_id, values):
session = get_session()
with session.begin():
encryption = models.Encryption()
if 'volume_type_id' not in values:
values['volume_type_id'] = volume_type_id
if 'encryption_id' not in values:
values['encryption_id'] = six.text_type(uuid.uuid4())
encryption.update(values)
session.add(encryption)
return encryption
@handle_db_data_error
@require_admin_context
def volume_type_encryption_update(context, volume_type_id, values):
session = get_session()
with session.begin():
encryption = volume_type_encryption_get(context, volume_type_id,
session)
if not encryption:
raise exception.VolumeTypeEncryptionNotFound(
type_id=volume_type_id)
encryption.update(values)
return encryption
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
volume_list = _volume_get_query(context, session=session,
project_only=False).\
filter_by(volume_type_id=volume_type_id).\
all()
return volume_list
####################
@require_context
def volume_encryption_metadata_get(context, volume_id, session=None):
"""Return the encryption metadata for a given volume."""
volume_ref = _volume_get(context, volume_id)
encryption_ref = volume_type_encryption_get(context,
volume_ref['volume_type_id'])
values = {
'encryption_key_id': volume_ref['encryption_key_id'],
}
if encryption_ref:
for key in ['control_location', 'cipher', 'key_size', 'provider']:
values[key] = encryption_ref[key]
return values
####################
@require_context
def _volume_glance_metadata_get_all(context, session=None):
query = model_query(context,
models.VolumeGlanceMetadata,
session=session)
if is_user_context(context):
query = query.filter(
models.Volume.id == models.VolumeGlanceMetadata.volume_id,
models.Volume.project_id == context.project_id)
return query.all()
@require_context
def volume_glance_metadata_get_all(context):
"""Return the Glance metadata for all volumes."""
return _volume_glance_metadata_get_all(context)
@require_context
def volume_glance_metadata_list_get(context, volume_id_list):
"""Return the glance metadata for a volume list."""
query = model_query(context,
models.VolumeGlanceMetadata,
session=None)
query = query.filter(
models.VolumeGlanceMetadata.volume_id.in_(volume_id_list))
return query.all()
@require_context
@require_volume_exists
def _volume_glance_metadata_get(context, volume_id, session=None):
rows = model_query(context, models.VolumeGlanceMetadata, session=session).\
filter_by(volume_id=volume_id).\
filter_by(deleted=False).\
all()
if not rows:
raise exception.GlanceMetadataNotFound(id=volume_id)
return rows
@require_context
@require_volume_exists
def volume_glance_metadata_get(context, volume_id):
"""Return the Glance metadata for the specified volume."""
return _volume_glance_metadata_get(context, volume_id)
@require_context
@require_snapshot_exists
def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None):
rows = model_query(context, models.VolumeGlanceMetadata, session=session).\
filter_by(snapshot_id=snapshot_id).\
filter_by(deleted=False).\
all()
if not rows:
raise exception.GlanceMetadataNotFound(id=snapshot_id)
return rows
@require_context
@require_snapshot_exists
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return _volume_snapshot_glance_metadata_get(context, snapshot_id)
@require_context
@require_volume_exists
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for a volume by adding a new key:value pair.
This API does not support changing the value of a key once it has been
created.
"""
session = get_session()
with session.begin():
rows = session.query(models.VolumeGlanceMetadata).\
filter_by(volume_id=volume_id).\
filter_by(key=key).\
filter_by(deleted=False).all()
if len(rows) > 0:
raise exception.GlanceMetadataExists(key=key,
volume_id=volume_id)
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = key
vol_glance_metadata.value = six.text_type(value)
session.add(vol_glance_metadata)
return
@require_context
@require_volume_exists
def volume_glance_metadata_bulk_create(context, volume_id, metadata):
"""Update the Glance metadata for a volume by adding new key:value pairs.
This API does not support changing the value of a key once it has been
created.
"""
session = get_session()
with session.begin():
for (key, value) in metadata.items():
rows = session.query(models.VolumeGlanceMetadata).\
filter_by(volume_id=volume_id).\
filter_by(key=key).\
filter_by(deleted=False).all()
if len(rows) > 0:
raise exception.GlanceMetadataExists(key=key,
volume_id=volume_id)
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = key
vol_glance_metadata.value = six.text_type(value)
session.add(vol_glance_metadata)
@require_context
@require_snapshot_exists
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This copies all of the key:value pairs from the originating volume, to
ensure that a volume created from the snapshot will retain the
original metadata.
"""
session = get_session()
with session.begin():
metadata = _volume_glance_metadata_get(context, volume_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.snapshot_id = snapshot_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
@require_volume_exists
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume.
This copies all all of the key:value pairs from the originating volume,
to ensure that a volume created from the volume (clone) will
retain the original metadata.
"""
session = get_session()
with session.begin():
metadata = _volume_glance_metadata_get(context,
src_volume_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
@require_volume_exists
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update Glance metadata from a volume.
Update the Glance metadata from a volume (created from a snapshot) by
copying all of the key:value pairs from the originating snapshot.
This is so that the Glance metadata from the original volume is retained.
"""
session = get_session()
with session.begin():
metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
def volume_glance_metadata_delete_by_volume(context, volume_id):
model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\
filter_by(snapshot_id=snapshot_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def backup_get(context, backup_id, read_deleted=None, project_only=True):
return _backup_get(context, backup_id,
read_deleted=read_deleted,
project_only=project_only)
def _backup_get(context, backup_id, session=None, read_deleted=None,
project_only=True):
result = model_query(context, models.Backup, session=session,
project_only=project_only,
read_deleted=read_deleted).\
filter_by(id=backup_id).\
first()
if not result:
raise exception.BackupNotFound(backup_id=backup_id)
return result
def _backup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
if filters and not is_valid_model_filters(models.Backup, filters):
return []
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, models.Backup)
if query is None:
return []
return query.all()
def _backups_get_query(context, session=None, project_only=False):
return model_query(context, models.Backup, session=session,
project_only=project_only)
def _process_backups_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Backup, filters):
return
query = query.filter_by(**filters)
return query
@require_admin_context
def backup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return _backup_get_all(context, filters, marker, limit, offset, sort_keys,
sort_dirs)
@require_admin_context
def backup_get_all_by_host(context, host):
return model_query(context, models.Backup).filter_by(host=host).all()
@require_context
def backup_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _backup_get_all(context, filters, marker, limit, offset, sort_keys,
sort_dirs)
@require_context
def backup_get_all_by_volume(context, volume_id, filters=None):
authorize_project_context(context, volume_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['volume_id'] = volume_id
return _backup_get_all(context, filters)
@handle_db_data_error
@require_context
def backup_create(context, values):
backup = models.Backup()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
backup.update(values)
session = get_session()
with session.begin():
backup.save(session)
return backup
@handle_db_data_error
@require_context
def backup_update(context, backup_id, values):
session = get_session()
with session.begin():
backup = model_query(context, models.Backup,
session=session, read_deleted="yes").\
filter_by(id=backup_id).first()
if not backup:
raise exception.BackupNotFound(
_("No backup with id %s") % backup_id)
backup.update(values)
return backup
@require_admin_context
def backup_destroy(context, backup_id):
model_query(context, models.Backup).\
filter_by(id=backup_id).\
update({'status': fields.BackupStatus.DELETED,
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def _transfer_get(context, transfer_id, session=None):
query = model_query(context, models.Transfer,
session=session).\
filter_by(id=transfer_id)
if not is_admin_context(context):
volume = models.Volume
query = query.filter(models.Transfer.volume_id == volume.id,
volume.project_id == context.project_id)
result = query.first()
if not result:
raise exception.TransferNotFound(transfer_id=transfer_id)
return result
@require_context
def transfer_get(context, transfer_id):
return _transfer_get(context, transfer_id)
def _translate_transfers(transfers):
results = []
for transfer in transfers:
r = {}
r['id'] = transfer['id']
r['volume_id'] = transfer['volume_id']
r['display_name'] = transfer['display_name']
r['created_at'] = transfer['created_at']
r['deleted'] = transfer['deleted']
results.append(r)
return results
@require_admin_context
def transfer_get_all(context):
results = model_query(context, models.Transfer).all()
return _translate_transfers(results)
@require_context
def transfer_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
query = model_query(context, models.Transfer).\
filter(models.Volume.id == models.Transfer.volume_id,
models.Volume.project_id == project_id)
results = query.all()
return _translate_transfers(results)
@require_context
@handle_db_data_error
def transfer_create(context, values):
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
volume_ref = _volume_get(context,
values['volume_id'],
session=session)
if volume_ref['status'] != 'available':
msg = _('Volume must be available')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume_ref['status'] = 'awaiting-transfer'
transfer = models.Transfer()
transfer.update(values)
session.add(transfer)
volume_ref.update(volume_ref)
return transfer
@require_context
@_retry_on_deadlock
def transfer_destroy(context, transfer_id):
session = get_session()
with session.begin():
transfer_ref = _transfer_get(context,
transfer_id,
session=session)
volume_ref = _volume_get(context,
transfer_ref['volume_id'],
session=session)
# If the volume state is not 'awaiting-transfer' don't change it, but
# we can still mark the transfer record as deleted.
if volume_ref['status'] != 'awaiting-transfer':
LOG.error(_LE('Volume in unexpected state %s, expected '
'awaiting-transfer'), volume_ref['status'])
else:
volume_ref['status'] = 'available'
volume_ref.update(volume_ref)
volume_ref.save(session=session)
model_query(context, models.Transfer, session=session).\
filter_by(id=transfer_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def transfer_accept(context, transfer_id, user_id, project_id):
session = get_session()
with session.begin():
transfer_ref = _transfer_get(context, transfer_id, session)
volume_id = transfer_ref['volume_id']
volume_ref = _volume_get(context, volume_id, session=session)
if volume_ref['status'] != 'awaiting-transfer':
msg = _('Transfer %(transfer_id)s: Volume id %(volume_id)s in '
'unexpected state %(status)s, expected '
'awaiting-transfer') % {'transfer_id': transfer_id,
'volume_id': volume_ref['id'],
'status': volume_ref['status']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume_ref['status'] = 'available'
volume_ref['user_id'] = user_id
volume_ref['project_id'] = project_id
volume_ref['updated_at'] = literal_column('updated_at')
volume_ref.update(volume_ref)
session.query(models.Transfer).\
filter_by(id=transfer_ref['id']).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_admin_context
def _consistencygroup_data_get_for_project(context, project_id,
session=None):
query = model_query(context,
func.count(models.ConsistencyGroup.id),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
result = query.first()
return (0, result[0] or 0)
@require_context
def _consistencygroup_get(context, consistencygroup_id, session=None):
result = model_query(context, models.ConsistencyGroup, session=session,
project_only=True).\
filter_by(id=consistencygroup_id).\
first()
if not result:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=consistencygroup_id)
return result
@require_context
def consistencygroup_get(context, consistencygroup_id):
return _consistencygroup_get(context, consistencygroup_id)
def _consistencygroups_get_query(context, session=None, project_only=False):
return model_query(context, models.ConsistencyGroup, session=session,
project_only=project_only)
def _process_consistencygroups_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.ConsistencyGroup, filters):
return
query = query.filter_by(**filters)
return query
def _consistencygroup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
if filters and not is_valid_model_filters(models.ConsistencyGroup,
filters):
return []
session = get_session()
with session.begin():
# Generate the paginate query
query = _generate_paginate_query(context, session, marker,
limit, sort_keys, sort_dirs, filters,
offset, models.ConsistencyGroup)
if query is None:
return []
return query.all()
@require_admin_context
def consistencygroup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Retrieves all consistency groups.
If no sort parameters are specified then the returned cgs are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see
_process_consistencygroups_filters function for more
information
:returns: list of matching consistency groups
"""
return _consistencygroup_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@require_context
def consistencygroup_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Retrieves all consistency groups in a project.
If no sort parameters are specified then the returned cgs are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see
_process_consistencygroups_filters function for more
information
:returns: list of matching consistency groups
"""
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _consistencygroup_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
@handle_db_data_error
@require_context
def consistencygroup_create(context, values):
consistencygroup = models.ConsistencyGroup()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
consistencygroup.update(values)
session.add(consistencygroup)
return _consistencygroup_get(context, values['id'], session=session)
@handle_db_data_error
@require_context
def consistencygroup_update(context, consistencygroup_id, values):
session = get_session()
with session.begin():
result = model_query(context, models.ConsistencyGroup,
project_only=True).\
filter_by(id=consistencygroup_id).\
first()
if not result:
raise exception.ConsistencyGroupNotFound(
_("No consistency group with id %s") % consistencygroup_id)
result.update(values)
result.save(session=session)
return result
@require_admin_context
def consistencygroup_destroy(context, consistencygroup_id):
session = get_session()
with session.begin():
model_query(context, models.ConsistencyGroup, session=session).\
filter_by(id=consistencygroup_id).\
update({'status': fields.ConsistencyGroupStatus.DELETED,
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def _cgsnapshot_get(context, cgsnapshot_id, session=None):
result = model_query(context, models.Cgsnapshot, session=session,
project_only=True).\
filter_by(id=cgsnapshot_id).\
first()
if not result:
raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id)
return result
@require_context
def cgsnapshot_get(context, cgsnapshot_id):
return _cgsnapshot_get(context, cgsnapshot_id)
def is_valid_model_filters(model, filters):
"""Return True if filter values exist on the model
:param model: a Cinder model
:param filters: dictionary of filters
"""
for key in filters.keys():
try:
getattr(model, key)
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return False
return True
def _cgsnapshot_get_all(context, project_id=None, group_id=None, filters=None):
query = model_query(context, models.Cgsnapshot)
if filters:
if not is_valid_model_filters(models.Cgsnapshot, filters):
return []
query = query.filter_by(**filters)
if project_id:
query = query.filter_by(project_id=project_id)
if group_id:
query = query.filter_by(consistencygroup_id=group_id)
return query.all()
@require_admin_context
def cgsnapshot_get_all(context, filters=None):
return _cgsnapshot_get_all(context, filters=filters)
@require_admin_context
def cgsnapshot_get_all_by_group(context, group_id, filters=None):
return _cgsnapshot_get_all(context, group_id=group_id, filters=filters)
@require_context
def cgsnapshot_get_all_by_project(context, project_id, filters=None):
authorize_project_context(context, project_id)
return _cgsnapshot_get_all(context, project_id=project_id, filters=filters)
@handle_db_data_error
@require_context
def cgsnapshot_create(context, values):
cgsnapshot = models.Cgsnapshot()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
cgsnapshot.update(values)
session.add(cgsnapshot)
return _cgsnapshot_get(context, values['id'], session=session)
@handle_db_data_error
@require_context
def cgsnapshot_update(context, cgsnapshot_id, values):
session = get_session()
with session.begin():
result = model_query(context, models.Cgsnapshot, project_only=True).\
filter_by(id=cgsnapshot_id).\
first()
if not result:
raise exception.CgSnapshotNotFound(
_("No cgsnapshot with id %s") % cgsnapshot_id)
result.update(values)
result.save(session=session)
return result
@require_admin_context
def cgsnapshot_destroy(context, cgsnapshot_id):
session = get_session()
with session.begin():
model_query(context, models.Cgsnapshot, session=session).\
filter_by(id=cgsnapshot_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than age from storage tables."""
try:
age_in_days = int(age_in_days)
except ValueError:
msg = _('Invalid value for age, %(age)s') % {'age': age_in_days}
LOG.exception(msg)
raise exception.InvalidParameterValue(msg)
if age_in_days <= 0:
msg = _('Must supply a positive value for age')
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
engine = get_engine()
session = get_session()
metadata = MetaData()
metadata.bind = engine
tables = []
for model_class in models.__dict__.values():
if hasattr(model_class, "__tablename__") \
and hasattr(model_class, "deleted"):
tables.append(model_class.__tablename__)
# Reorder the list so the volumes table is last to avoid FK constraints
tables.remove("volumes")
tables.append("volumes")
for table in tables:
t = Table(table, metadata, autoload=True)
LOG.info(_LI('Purging deleted rows older than age=%(age)d days '
'from table=%(table)s'), {'age': age_in_days,
'table': table})
deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days)
try:
with session.begin():
result = session.execute(
t.delete()
.where(t.c.deleted_at < deleted_age))
except db_exc.DBReferenceError:
LOG.exception(_LE('DBError detected when purging from '
'table=%(table)s'), {'table': table})
raise
rows_purged = result.rowcount
LOG.info(_LI("Deleted %(row)d rows from table=%(table)s"),
{'row': rows_purged, 'table': table})
###############################
@require_context
def driver_initiator_data_update(context, initiator, namespace, updates):
session = get_session()
with session.begin():
set_values = updates.get('set_values', {})
for key, value in set_values.items():
data = session.query(models.DriverInitiatorData).\
filter_by(initiator=initiator).\
filter_by(namespace=namespace).\
filter_by(key=key).\
first()
if data:
data.update({'value': value})
data.save(session=session)
else:
data = models.DriverInitiatorData()
data.initiator = initiator
data.namespace = namespace
data.key = key
data.value = value
session.add(data)
remove_values = updates.get('remove_values', [])
for key in remove_values:
session.query(models.DriverInitiatorData).\
filter_by(initiator=initiator).\
filter_by(namespace=namespace).\
filter_by(key=key).\
delete()
@require_context
def driver_initiator_data_get(context, initiator, namespace):
session = get_session()
with session.begin():
return session.query(models.DriverInitiatorData).\
filter_by(initiator=initiator).\
filter_by(namespace=namespace).\
all()
###############################
PAGINATION_HELPERS = {
models.Volume: (_volume_get_query, _process_volume_filters, _volume_get),
models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get),
models.Backup: (_backups_get_query, _process_backups_filters, _backup_get),
models.QualityOfServiceSpecs: (_qos_specs_get_query,
_process_qos_specs_filters, _qos_specs_get),
models.VolumeTypes: (_volume_type_get_query, _process_volume_types_filters,
_volume_type_get_db_object),
models.ConsistencyGroup: (_consistencygroups_get_query,
_process_consistencygroups_filters,
_consistencygroup_get)
}
###############################
@require_context
def image_volume_cache_create(context, host, image_id, image_updated_at,
volume_id, size):
session = get_session()
with session.begin():
cache_entry = models.ImageVolumeCacheEntry()
cache_entry.host = host
cache_entry.image_id = image_id
cache_entry.image_updated_at = image_updated_at
cache_entry.volume_id = volume_id
cache_entry.size = size
session.add(cache_entry)
return cache_entry
@require_context
def image_volume_cache_delete(context, volume_id):
session = get_session()
with session.begin():
session.query(models.ImageVolumeCacheEntry).\
filter_by(volume_id=volume_id).\
delete()
@require_context
def image_volume_cache_get_and_update_last_used(context, image_id, host):
session = get_session()
with session.begin():
entry = session.query(models.ImageVolumeCacheEntry).\
filter_by(image_id=image_id).\
filter_by(host=host).\
order_by(desc(models.ImageVolumeCacheEntry.last_used)).\
first()
if entry:
entry.last_used = timeutils.utcnow()
entry.save(session=session)
return entry
@require_context
def image_volume_cache_get_by_volume_id(context, volume_id):
session = get_session()
with session.begin():
return session.query(models.ImageVolumeCacheEntry).\
filter_by(volume_id=volume_id).\
first()
@require_context
def image_volume_cache_get_all_for_host(context, host):
session = get_session()
with session.begin():
return session.query(models.ImageVolumeCacheEntry).\
filter_by(host=host).\
order_by(desc(models.ImageVolumeCacheEntry.last_used)).\
all()
###############################
def get_model_for_versioned_object(versioned_object):
# Exceptions to model mapping, in general Versioned Objects have the same
# name as their ORM models counterparts, but there are some that diverge
VO_TO_MODEL_EXCEPTIONS = {
'BackupImport': models.Backup,
'VolumeType': models.VolumeTypes,
'CGSnapshot': models.Cgsnapshot,
}
model_name = versioned_object.obj_name()
return (VO_TO_MODEL_EXCEPTIONS.get(model_name) or
getattr(models, model_name))
def _get_get_method(model):
# Exceptions to model to get methods, in general method names are a simple
# conversion changing ORM name from camel case to snake format and adding
# _get to the string
GET_EXCEPTIONS = {
models.ConsistencyGroup: consistencygroup_get,
models.VolumeTypes: _volume_type_get_full,
}
if model in GET_EXCEPTIONS:
return GET_EXCEPTIONS[model]
# General conversion
# Convert camel cased model name to snake format
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__)
# Get method must be snake formatted model name concatenated with _get
method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get'
return globals().get(method_name)
_GET_METHODS = {}
@require_context
def get_by_id(context, model, id, *args, **kwargs):
# Add get method to cache dictionary if it's not already there
if not _GET_METHODS.get(model):
_GET_METHODS[model] = _get_get_method(model)
return _GET_METHODS[model](context, id, *args, **kwargs)
def condition_db_filter(model, field, value):
"""Create matching filter.
If value is an iterable other than a string, any of the values is
a valid match (OR), so we'll use SQL IN operator.
If it's not an iterator == operator will be used.
"""
orm_field = getattr(model, field)
# For values that must match and are iterables we use IN
if (isinstance(value, collections.Iterable) and
not isinstance(value, six.string_types)):
# We cannot use in_ when one of the values is None
if None not in value:
return orm_field.in_(value)
return or_(orm_field == v for v in value)
# For values that must match and are not iterables we use ==
return orm_field == value
def condition_not_db_filter(model, field, value, auto_none=True):
"""Create non matching filter.
If value is an iterable other than a string, any of the values is
a valid match (OR), so we'll use SQL IN operator.
If it's not an iterator == operator will be used.
If auto_none is True then we'll consider NULL values as different as well,
like we do in Python and not like SQL does.
"""
result = ~condition_db_filter(model, field, value)
if (auto_none
and ((isinstance(value, collections.Iterable) and
not isinstance(value, six.string_types)
and None not in value)
or (value is not None))):
orm_field = getattr(model, field)
result = or_(result, orm_field.is_(None))
return result
def is_orm_value(obj):
"""Check if object is an ORM field or expression."""
return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute,
sqlalchemy.sql.expression.ColumnElement))
@_retry_on_deadlock
@require_context
def conditional_update(context, model, values, expected_values, filters=(),
include_deleted='no', project_only=False):
"""Compare-and-swap conditional update SQLAlchemy implementation."""
# Provided filters will become part of the where clause
where_conds = list(filters)
# Build where conditions with operators ==, !=, NOT IN and IN
for field, condition in expected_values.items():
if not isinstance(condition, storage.Condition):
condition = storage.Condition(condition, field)
where_conds.append(condition.get_filter(model, field))
# Transform case values
values = {field: case(value.whens, value.value, value.else_)
if isinstance(value, storage.Case)
else value
for field, value in values.items()}
query = model_query(context, model, read_deleted=include_deleted,
project_only=project_only)
# Return True if we were able to change any DB entry, False otherwise
result = query.filter(*where_conds).update(values,
synchronize_session=False)
return 0 != result
|
{
"content_hash": "58bfdd3a037fecb93f52e794016f1842",
"timestamp": "",
"source": "github",
"line_count": 4515,
"max_line_length": 81,
"avg_line_length": 35.10299003322259,
"alnum_prop": 0.597457252823522,
"repo_name": "HybridF5/jacket",
"id": "26e94fc13c46242917904460307918e97d20f721",
"size": "159311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/db/storage/sqlalchemy/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
}
|
from random import random, choice, randint
from string import printable
from collections import Counter
from nose.tools import assert_raises
from importlib import import_module
from feature import *
def randstr(length=12):
return "".join([choice(printable) for _ in range(length)])
def test_setter_extended():
"""Test if setting multiple features works."""
group = Group({"a": Numerical(), "b": Numerical(), "c": Group({"d": Numerical(), }), })
group.set_a(10)
group.set_b(20)
group.set_c_d(30)
group.push()
group.set_a(11)
group.set_b(21)
group.set_c(31)
group.push()
group.set("a", 12)
group.set("b", 22)
group.set("c", "d", 32)
group.push()
group.set("a", 13)
group.set("b", 23)
group.set("c", 33)
group.push()
array = group.array()
assert array.shape == (4, 3)
for i, row in enumerate(array):
assert tuple(row) == (10 + i, 20 + i, 30 + i)
def test_numerical_feature():
"""Test the Numerical feature class."""
group = Group({"a": Numerical(),
"b": Numerical(),
"c": Numerical(dimensions=3),
"d": Numerical(dimensions="xyz"), })
group.set_a(100)
group.set_b(200)
group.set_c(0, 10)
group.set_c(1, 20)
group.set_c(2, 30)
group.set_d("x", 1)
group.set_d("y", 2)
group.set_d("z", 3)
group.push()
group.set_a(100)
group.set_b(200)
group.set_c(0, 40)
group.set_c(1, 50)
group.set_c(2, 60)
group.set_d_x(1)
group.set_d_y(2)
group.set_d_z(3)
group.push()
array = group.array()
assert array.shape == (2, 8)
count = Counter()
for row in array:
for column, value in zip(array.columns, row):
count[column[0]] += value
assert count["a"] == 200
assert count["b"] == 400
assert count["c"] == 210
assert count["d"] == 12
def test_categorical_feature():
"""Test the Categorical feature class."""
feature = Categorical("abc")
for element in "abc":
feature.set(element)
feature.set("ignore this")
feature.push()
for element in "abc":
getattr(feature, "set_" + element)()
feature.push()
array = feature.array()
assert array.shape == (6, 3)
for i, row in enumerate(array):
assert sum(row) == 1.0 and row[i % 3] == 1.0
def test_hashed_feature():
"""Test the Hashed feature class."""
def mock(c):
return ord(c) - ord('a')
group = Group({"a": Hashed(buckets=3, hash=mock), "b": Hashed(buckets=5, hash=mock), })
for i in range(10):
group.set_a("abcde" [i % 3])
group.set_b("abcde" [i % 5])
group.push()
array = group.array()
assert array.shape == (10, 8)
for i, row in enumerate(array):
for column, value in zip(array.columns, row):
feature, index = column.split("_")
if feature == "a":
assert value == float((i % 3) == int(index))
else:
assert value == float((i % 5) == int(index))
def test_hashed_feature_random_sign():
"""Test if the default hash function distributes random signs evenly."""
group = Group({"a": Hashed(buckets=100, random_sign=True), })
for i in range(100):
for j in range(100):
group.set(randstr(), weight=123)
group.push()
array = group.array()
assert array.shape == (100, 100)
pos, neg = 0, 0
for row in array:
for value in row:
assert value == 0 or abs(value) == 123
pos += int(value > 0)
neg += int(value < 0)
assert pos and neg and abs(pos - neg) < (pos + neg) * 0.1
def test_stress():
"""Test to see if using different classes works."""
group = Group({
"a": Numerical(),
"b": Numerical(),
"c": Categorical(list(range(5))),
"d": Hashed(buckets=5),
"e": Hashed(buckets=5,
random_sign=True),
})
for i in range(100):
group.set_a(random())
group.set_b(random())
group.set_c(randint(0, 4))
for i in range(10):
group.set_d(randstr())
group.set_e(randstr())
group.push()
array = group.array()
assert array.shape == (100, 17)
class CustomSized(Feature):
"""Custom feature with predefined size."""
Dimensions = 4
def set(self, x):
self.slot[x] = 1.0
class CustomNamed(Feature):
"""Custom feature with predefined field names."""
Dimensions = ["a", "b", "c", "d"]
def set(self, x):
self.slot[x] = 1.0
class CustomDynamic(Feature):
"""Custom feature with dynamic field names."""
def set(self, x):
self.slot[x] = 1.0
class CustomSlotList(Feature):
"""Custom feature with its own slot."""
def set(self):
self.slot = [1, 2, 3]
class CustomSlotDict(Feature):
"""Custom feature with its own slot."""
def set(self):
self.slot = {"foo": 1, "bar": 2, "baz": 3}
def test_custom_features():
"""Test if custom features work."""
group = Group({
"a": CustomSized(),
"b": CustomNamed(),
"c": CustomDynamic(),
"d": CustomSlotList(),
"e": CustomSlotDict(),
})
for _ in range(10):
for x in range(4):
group.set_a(x)
for x in "abcd":
group.set_b(x)
group.set_c("blub")
group.set_d()
group.set_e()
group.push()
array = group.array()
assert array.shape == (10, 15)
def test_field_name_errors():
"""Test if using undefined keys in features with predefined size or
field names causes an exception."""
group = Group({"test": CustomSized(), })
group.set_test(5)
assert_raises(KeyError, group.push)
group = Group({"test": CustomNamed(), })
group.set_test("e")
assert_raises(KeyError, group.push)
def test_custom_empty():
"""Test if array can be build from empty features when the field size or
the field names are fixed."""
group = Group({
"a": CustomSized(),
"b": CustomNamed(),
"c": Numerical(dimensions=4),
"d": Hashed(buckets=4),
"e": Categorical([1, 2, 3, 4]),
})
for i in range(10):
group.push()
array = group.array()
assert array.shape == (10, 20)
def test_array_concat():
"""Test if array concatenation works."""
array = Array(columns="abc")
for i in range(10):
array.append([1, 2, 3])
# Any 2-dimensional array witht the same number of rows should work.
other = [[4, 5, 6]] * len(array)
array.concat(other)
assert array.shape == (10, 6)
assert len(array.columns) == 6
assert all(type(column) is str for column in array.columns)
for row in array:
assert tuple(row) == (1, 2, 3, 4, 5, 6)
# Now this should fail since the columns have the same names.
other = Array(columns="abc")
for i in range(10):
other.append([7, 8, 9])
assert_raises(ValueError, array.concat, other)
# Adding a prefix should make it work.
array.concat(other, prefix="other")
assert array.shape == (10, 9)
assert len(array.columns) == 9
for row in array:
assert tuple(row) == (1, 2, 3, 4, 5, 6, 7, 8, 9)
def test_array_concat_numpy():
try:
import numpy
except ImportError as e:
return
array = Array(columns="abc")
for i in range(10):
array.append([1, 2, 3])
other = numpy.random.rand(len(array), 4)
array.concat(other, prefix="other")
assert array.shape == (10, 7)
assert len(array.columns) == 7
for i, row in enumerate(array):
assert all(x == y for x, y in zip(row[-4:], other[i]))
def test_pipe_simple():
"""Test if transforming the array works."""
def transform(array):
"""Turns the (n,2) array into a (n,4) array."""
assert array.shape == (10, 2)
new = Array(columns="abcd")
for x, y in array:
new.append([x, y, x + y, x * y])
return new
group = Pipe(Group({"a": Numerical(), "b": Numerical()}), transform)
for _ in range(10):
group.set_a(1e-6 + random())
group.set_b(1e-6 + random())
group.push()
array = group.array()
assert array.shape == (10, 4)
for row in array:
assert row[0] > 0.0 and row[1] > 0.0
assert row[2] == row[0] + row[1]
assert row[3] == row[0] * row[1]
def test_pipe_numpy():
try:
import numpy
import numpy.testing
except ImportError as e:
return
zero_mean = lambda x: x - x.mean(axis=0)
unit_variance = lambda x: x / x.std(axis=0)
group = Group({
"a": Numerical(dimensions=10),
"b": Pipe(
Numerical(dimensions=10),
numpy.array,
zero_mean,
unit_variance),
})
for i in range(200):
for j in range(10):
group.set_a(j, random())
group.set_b(j, random())
group.push()
array = numpy.array(group.array())
assert array.shape == (200, 20)
a_avg, a_std = 0.5, 0.2887
b_avg, b_std = 0.0, 1.0
avg = array.mean(axis=0)
numpy.testing.assert_allclose(avg, [a_avg] * 10 + [b_avg] * 10, atol=0.2, rtol=0.0)
std = array.std(axis=0)
numpy.testing.assert_allclose(std, [a_std] * 10 + [b_std] * 10, atol=0.1, rtol=0.0)
|
{
"content_hash": "74c10160b857f22fae83297d69c58b63",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 91,
"avg_line_length": 24.44559585492228,
"alnum_prop": 0.5494913098770665,
"repo_name": "slyrz/feature",
"id": "fce582ac93796997d4db18f9cc7641594d84bd44",
"size": "9436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19417"
}
],
"symlink_target": ""
}
|
"""
=======
License
=======
Copyright (c) 2015 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# pylint: disable=R0201
import unittest
from hamcrest import assert_that, equal_to
from concept.math.factorization import probe
class TestFactorization(unittest.TestCase):
""" Testing of factorization functions. """
def test_probe(self):
""" Testing probe function. """
assert_that(probe(12), equal_to([1, 2, 3, 4, 6, 12]))
assert_that(probe(-12), equal_to([1, 2, 3, 4, 6, 12]))
|
{
"content_hash": "2c95d4cc9a642398d1c3af1b3a6b7a62",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 93,
"avg_line_length": 42.13513513513514,
"alnum_prop": 0.72674791533034,
"repo_name": "Nachtfeuer/concept-py",
"id": "3798ec7a6b8a6a7f7f48a4b5c43b8e1b0e8b4f8b",
"size": "1559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_factorization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "1312"
},
{
"name": "Jupyter Notebook",
"bytes": "70195"
},
{
"name": "Python",
"bytes": "421559"
},
{
"name": "Roff",
"bytes": "1202"
},
{
"name": "Shell",
"bytes": "7482"
}
],
"symlink_target": ""
}
|
import mysql.connector
from objects import Cart
from objects import Product
config = {
'user':'shop',
'password':'pass',
'host':'localhost',
'database':'webshop'
}
#Assign decimal precision
import decimal
decimal.getcontext().prec = 3
# Get products, with query options
# If userId is given, inner join is made between products and shopcarts
def getProducts(name = None, sortBy = None, minPrice = None, maxPrice = None, offset = None, limit = None, productIds = None, code = None, userId = None):
products = []
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor(buffered=True)
params = []
query = "SELECT product_id, code, name, price, in_stock FROM products"
if userId is not None and userId > 0:
query += " JOIN shopcarts sc USING(product_id)"
query += " WHERE TRUE"
#Form rest of the query according to given parameters
if productIds is not None and len(productIds) > 0:
query += " AND product_id IN ("
for i in range(0,len(productIds)):
if i > 0:
query += ","
query += "%s"
params.append(productIds[i])
query += ")"
if userId is not None and userId > 0:
query += " AND sc.user_id = %s"
params.append(userId)
if code is not None:
query += " AND code = %s"
params.append(code)
if name is not None:
query += " AND name LIKE %s"
name += "%%"
params.append(name)
if minPrice is not None:
query += " AND price >= %s"
params.append(minPrice)
if maxPrice is not None:
query += " AND price <= %s"
params.append(maxPrice)
if sortBy is not None:
if sortBy == 'name':
query += " ORDER BY name ASC"
elif sortBy == 'price':
query += " ORDER BY price ASC"
if limit is not None:
query += " LIMIT %s"
params.append(limit)
#No offset without limit
if offset is not None:
query += " OFFSET %s"
params.append(offset)
#Exec query
print(query)
cursor.execute(query,params)
#Add resulted products to array
for (product_id, code, name, price, in_stock) in cursor:
products.append(Product(product_id, str(code), str(name), decimal.Decimal(price), in_stock))
#print(product_id, code, name, price, in_stock)
except mysql.connector.Error as e:
print("Error in products query: {}".format(e))
finally:
if conn:
conn.close()
if cursor:
cursor.close()
return products
# Add new or update existing product
def updateProduct(product):
if product is not None:
#Validate insertable object
product.validate()
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
query, params = "", ()
if product.productId is None or product.productId <= 0:
query = "INSERT INTO products(code,name,price,in_stock) VALUES(%s, %s, %s, %s)"
params = (product.code, product.name, product.price, product.in_stock)
else:
query = "UPDATE products SET code = %s, name = %s, price = %s, in_stock = %s WHERE product_id = %s"
params = (product.code, product.name, product.price, product.in_stock, product.productId)
cursor.execute(query, params)
conn.commit()
except mysql.connector.Error as e:
print("Error in product update: {}".format(e))
finally:
if cursor:
cursor.close()
if conn:
conn.close()
# Delete product from database
def removeProduct(productId):
if productId == None:
return
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
query = "DELETE FROM products WHERE product_id = %s"
cursor.execute(query, [productId])
conn.commit()
except mysql.connector.Error as e:
print("Error in product deletion: {}".format(e))
finally:
if cursor:
cursor.close()
if conn:
conn.close()
# Get shopping cart of user
def getShoppingCart(userId):
result = None
if userId is not None:
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
query = "SELECT product_id, count FROM shopcarts WHERE user_id = %s"
cursor.execute(query, [userId])
#Build Cart object from results
for (product_id, count) in cursor:
if result is None:
result = Cart(userId)
result.products[product_id] = count
except mysql.connector.Error as e:
print("Error in product update: {}".format(e))
finally:
if cursor:
cursor.close()
if conn:
conn.close()
return result
def updateShoppingCart(cart):
if cart != None:
#Validate insertable/updateable object
cart.validate()
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
query, params = "", []
#Delete earlier products in cart by userId
#cart can also be effectively cleared with no products in cart object
query = "DELETE FROM shopcarts WHERE user_id = %s"
cursor.execute(query, [cart.userId])
conn.commit()
#Form a querystring of values from products in cart
query = ""
for productId, count in cart.products.items():
if len(query) > 0:
query += ","
query += "(%s, %s, %s)"
# Add userId, productId, productsCount to queryParams
params.append(cart.userId)
params.append(productId)
params.append(count)
#Check if data is to be added to db
if len(query) > 0:
query = ("INSERT INTO shopcarts (user_id, product_id, count) VALUES " + query)
##print(query, params)
cursor.execute(query, params)
conn.commit()
except mysql.connector.Error as e:
print("Error in cart update: {}".format(e))
finally:
if cursor:
cursor.close()
if conn:
conn.close()
return
|
{
"content_hash": "e655de7bcb0e4fa5ff80334cf8bcdbbf",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 154,
"avg_line_length": 28.242798353909464,
"alnum_prop": 0.5241148185924522,
"repo_name": "alamminsalo/py-webshop",
"id": "20c3a21ab15f5c006d2cf24f7f08120d88fbcf33",
"size": "6887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24192"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param time: {"type": "string", "format": "string"}
:param offset: {"type": "string", "format": "string"}
:param date: {"type": "string", "format": "string"}
:param timezone: {"type": "string", "format": "string"}
:param source_type: {"type": "number", "format": "number"}
:param day: {"type": "string", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.time = ""
self.offset = ""
self.date = ""
self.timezone = ""
self.source_type = ""
self.day = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Show(A10BaseClass):
"""Class Description::
Operational Status for the object show.
Class show supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/clock/show/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "show"
self.a10_url="/axapi/v3/clock/show/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "f811e9b713bb8d885793220f45b4f78f",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 116,
"avg_line_length": 26.96969696969697,
"alnum_prop": 0.5837078651685393,
"repo_name": "a10networks/a10sdk-python",
"id": "6746313b1526a7fd167fc129644be25c6bd38a32",
"size": "1780",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/clock/clock_show_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
}
|
"""Configure py.test."""
import json
from unittest.mock import PropertyMock, patch
import pytest
from tests.common import load_fixture
@pytest.fixture(name="tomorrowio_config_flow_connect", autouse=True)
def tomorrowio_config_flow_connect():
"""Mock valid tomorrowio config flow setup."""
with patch(
"homeassistant.components.tomorrowio.config_flow.TomorrowioV4.realtime",
return_value={},
):
yield
@pytest.fixture(name="tomorrowio_config_entry_update", autouse=True)
def tomorrowio_config_entry_update_fixture():
"""Mock valid tomorrowio config entry setup."""
with patch(
"homeassistant.components.tomorrowio.TomorrowioV4.realtime_and_all_forecasts",
return_value=json.loads(load_fixture("v4.json", "tomorrowio")),
) as mock_update, patch(
"homeassistant.components.tomorrowio.TomorrowioV4.max_requests_per_day",
new_callable=PropertyMock,
) as mock_max_requests_per_day, patch(
"homeassistant.components.tomorrowio.TomorrowioV4.num_api_requests",
new_callable=PropertyMock,
) as mock_num_api_requests:
mock_max_requests_per_day.return_value = 100
mock_num_api_requests.return_value = 2
yield mock_update
|
{
"content_hash": "c9db8b970d2d17b05e0be0c0c44a7e4b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 86,
"avg_line_length": 35.48571428571429,
"alnum_prop": 0.7069243156199678,
"repo_name": "mezz64/home-assistant",
"id": "2d36d68c57a76875e14f3a738619fdeb8f33cc7e",
"size": "1242",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/tomorrowio/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import requests
keywords = ['politics', 'us', 'world', 'technology', 'sports', 'business', 'entertainment', 'science', 'health']
states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
# format of return will be:
# [general, technology, sports, business, entertainment, science, other]
# OTHER IS A DICTIONARY
# within each element of this list, you will have:
# [name of article, url, provider, description]
def bing_search(query):
url = 'https://api.cognitive.microsoft.com/bing/v5.0/news?category='+query+'&mkt=en-us'
# query string parameters
payload = {'q': query, 'freshness' : 'Week'}
# custom headers
headers = {'Ocp-Apim-Subscription-Key': '22207001cbdc4c2487ad91d1cec1bdf2'}
#22207001cbdc4c2487ad91d1cec1bdf2
# make GET request
r = requests.get(url, params=payload, headers=headers)
# get JSON response
listOfArticles = r.json()['value']
masterList = []
for article in listOfArticles:
if('clusteredArticles' in article):
information = article['clusteredArticles']
else:
information = article
thisList = []
if(type(information) is dict):
thisList.append(article.get('name'))
thisList.append(information['url'])
provider = information['provider'][0]
thisList.append(provider['name'])
thisList.append(str(information['description'].encode("ascii", "ignore")))
masterList.append(thisList)
return masterList
def generateResponse():
politicsList = bing_search('politics')
usList = bing_search('us')
worldList = bing_search('world')
generalList = politicsList + usList + worldList
techList = bing_search('technology')
sportsList = bing_search('sports')
buisnessList = bing_search('business')
entertainmentList = bing_search('entertainment')
scienceList = bing_search('science')
scienceList += bing_search('health')
masterList = []
masterList.append(generalList)
masterList.append(techList)
masterList.append(sportsList)
masterList.append(buisnessList)
masterList.append(entertainmentList)
masterList.append(scienceList)
return masterList
def generateStates(query):
finalResult = []
url = 'https://api.cognitive.microsoft.com/bing/v5.0/news/trendingtopics'
# query string parameters
payload = {'q': query}
# custom headers
headers = {'Ocp-Apim-Subscription-Key': '028fb806bc014b3baf2426e3ac1292dc '}
r = requests.get(url, params=payload, headers=headers)
articles = r.json()['value']
max = 10
for article in articles:
if (max == 0):
break
max -= 1
if(type(article) is dict):
result = []
result.append(str(article['name'].encode("ascii", "ignore")))
result.append(str(article['webSearchUrl'].encode("ascii", "ignore")))
provider = article['image']['provider']
result.append(str(provider[0]['name'].encode("ascii", "ignore")))
result.append('No description available.')
finalResult.append(result)
return finalResult
def getAllStates():
d = {}
i = 0
for key, value in states.iteritems():
d[key] = generateStates(value)
return d
|
{
"content_hash": "046e35b19e27ffc040cc6eaa9c20dbec",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 112,
"avg_line_length": 29.357142857142858,
"alnum_prop": 0.59721300597213,
"repo_name": "srujant/MLNews",
"id": "79e9481a3e91f289dbd6b7931c17e92e4d123168",
"size": "4521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "otherAPIs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "676330"
},
{
"name": "GLSL",
"bytes": "172699"
},
{
"name": "HTML",
"bytes": "15064545"
},
{
"name": "JavaScript",
"bytes": "50755294"
},
{
"name": "OpenEdge ABL",
"bytes": "2662139"
},
{
"name": "Python",
"bytes": "216303"
},
{
"name": "Shell",
"bytes": "4042"
}
],
"symlink_target": ""
}
|
__author__ = 'calvin'
import contextlib
from future.utils import iteritems
from .property import Property
from .exceptions import *
class EventDispatcher(object):
def __init__(self, *args, **kwargs):
self.event_dispatcher_event_callbacks = {}
self.event_dispatcher_properties = {}
bindings = EventDispatcher.register_properties(self)
self.bind(**bindings)
@staticmethod
def register_properties(obj, properties=None):
"""
Walk backwards through the MRO looking for event dispatcher Property attributes in the classes.
Then register and bind them to the default handler 'on_<prop_name>' if it exists.
Walking backwards allows you to override the default value for a superclass.
If the 'properties' argument is given, then only register the properties in the dictionary
'properties' must be a dictionary of keys being the attribute name and values being the eventdispatcher
Property object.
"""
bindings = {}
if properties is None:
for cls in reversed(obj.__class__.__mro__):
for prop_name, prop in iteritems(cls.__dict__):
if isinstance(prop, Property):
prop.name = prop_name
prop.register(obj, prop_name, prop.default_value)
if hasattr(obj, 'on_%s' % prop_name):
bindings[prop_name] = getattr(obj, 'on_{}'.format(prop_name))
else:
for prop_name, prop in iteritems(properties):
prop.name = prop_name
prop.register(obj, prop_name, prop.default_value)
if hasattr(obj, 'on_%s' % prop_name):
bindings[prop_name] = getattr(obj, 'on_{}'.format(prop_name))
return bindings
def force_dispatch(self, prop_name, value):
"""
Assigns the value to the property and then dispatches the event, regardless of whether that value is the same
as the previous value.
:param prop_name: property name
:param value: value to assign to the property
"""
previous_value = getattr(self, prop_name)
if previous_value == value:
self.dispatch(prop_name, self, previous_value)
else:
setattr(self, prop_name, value)
def dispatch(self, key, *args, **kwargs):
"""
Dispatch a property. This calls all functions bound to the property.
:param event: property name
:param args: arguments to provide to the bindings
:param kwargs: keyword arguments to provide to the bindings
"""
for callback in self.event_dispatcher_properties[key]['callbacks']:
if callback(*args, **kwargs):
break
def dispatch_event(self, event, *args, **kwargs):
"""
Dispatch an event. This calls all functions bound to the event.
:param event: event name
:param args: arguments to provide to the bindings
:param kwargs: keyword arguments to provide to the bindings
"""
for callback in self.event_dispatcher_event_callbacks[event]:
if callback(*args, **kwargs):
break
def register_event(self, *event_names):
"""
Create an event that can be bound to and dispatched.
:param event_names: Name of the event
"""
for event_name in event_names:
default_dispatcher = getattr(self, 'on_{}'.format(event_name), None)
if default_dispatcher:
self.event_dispatcher_event_callbacks[event_name] = [default_dispatcher]
else:
self.event_dispatcher_event_callbacks[event_name] = []
def unbind(self, **kwargs):
"""
Unbind the specified callbacks associated with the property / event names
:param kwargs: {property name: callback} bindings
"""
all_properties = self.event_dispatcher_properties
for prop_name, callback in iteritems(kwargs):
if prop_name in all_properties:
try:
all_properties[prop_name]['callbacks'].remove(callback)
except ValueError:
raise BindError("No binding for {} in property '{}'".format(callback.__name__, prop_name))
elif prop_name in self.event_dispatcher_event_callbacks:
try:
self.event_dispatcher_event_callbacks[prop_name].remove(callback)
except ValueError:
raise BindError("No binding for {} in event '{}'".format(callback.__name__, prop_name))
else:
raise BindError('No property or event by the name of %s' % prop_name)
def unbind_all(self, *args):
"""
Unbind all callbacks associated with the specified property / event names
:param args: property / event names
"""
all_properties = self.event_dispatcher_properties
for prop_name in args:
if prop_name in all_properties:
del all_properties[prop_name]['callbacks'][:]
elif prop_name in self.event_dispatcher_event_callbacks:
del self.event_dispatcher_event_callbacks[prop_name][:]
else:
raise BindError("No such property or event '%s'" % prop_name)
def bind(self, **kwargs):
"""
Bind a function to a property or event.
:param kwargs: {property name: callback} bindings
"""
for prop_name, callback in iteritems(kwargs):
if prop_name in self.event_dispatcher_properties:
# Queue the callback into the property
self.event_dispatcher_properties[prop_name]['callbacks'].append(callback)
elif prop_name in self.event_dispatcher_event_callbacks:
# If a property was not found, search in events
self.event_dispatcher_event_callbacks[prop_name].append(callback)
else:
raise BindError("No property or event by the name of '%s'" % prop_name)
def bind_once(self, **kwargs):
"""
Bind a function to a property or event and unbind it after the first time the function has been called
:param kwargs: {property name: callback} bindings
"""
for prop_name, callback in iteritems(kwargs.copy()):
def _wrapped_binding(*args):
callback()
self.unbind(**{prop_name: _wrapped_binding})
self.bind(**{prop_name: _wrapped_binding})
kwargs.pop(prop_name)
if kwargs:
self.bind_once(**kwargs)
return
def setter(self, prop_name):
return lambda inst, value: setattr(self, prop_name, value)
def get_dispatcher_property(self, prop_name):
return self.event_dispatcher_properties[prop_name]['property']
@contextlib.contextmanager
def temp_unbind(self, **bindings):
"""
Context manager to temporarily suspend dispatching of a specified callback.
:param bindings: keyword argument of property_name=callback_func
"""
# Enter / With
all_properties = self.event_dispatcher_properties
callbacks = {}
for prop_name, binding in iteritems(bindings):
if prop_name in all_properties:
# Make a copy of the callback sequence so we can revert back
callbacks[prop_name] = all_properties[prop_name]['callbacks'][:]
# Remove the specified bindings
if binding in all_properties[prop_name]['callbacks']:
all_properties[prop_name]['callbacks'].remove(binding)
elif prop_name in self.event_dispatcher_event_callbacks:
callbacks[prop_name] = self.event_dispatcher_event_callbacks[prop_name][:]
self.event_dispatcher_event_callbacks[prop_name].remove(binding)
# Inside of with statement
yield None
# Finally / Exit
for prop_name, cb in iteritems(callbacks):
if prop_name in all_properties:
all_properties[prop_name]['callbacks'] = cb
elif prop_name in self.event_dispatcher_event_callbacks:
self.event_dispatcher_event_callbacks[prop_name] = callbacks[prop_name]
@contextlib.contextmanager
def temp_unbind_all(self, *prop_name):
"""
Context manager to temporarily suspend dispatching of the listed properties or events. Assigning a different
value to these properties or dispatching events inside the with statement will not dispatch the bindings.
:param prop_name: property or event names to suspend
"""
# Enter / With
property_callbacks = {}
event_callbacks = {}
for name in prop_name:
if name in self.event_dispatcher_properties:
property_callbacks[name] = self.event_dispatcher_properties[name]['callbacks']
self.event_dispatcher_properties[name]['callbacks'] = []
if name in self.event_dispatcher_event_callbacks:
event_callbacks[name] = self.event_dispatcher_event_callbacks[name]
self.event_dispatcher_event_callbacks[name] = []
# Inside of with statement
yield None
# Finally / Exit
for name in prop_name:
if name in property_callbacks:
self.event_dispatcher_properties[name]['callbacks'] = property_callbacks[name]
if name in event_callbacks:
self.event_dispatcher_event_callbacks[name] = event_callbacks[name]
|
{
"content_hash": "72f05178db279791c1d9c1cb843377d0",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 117,
"avg_line_length": 45.15813953488372,
"alnum_prop": 0.6037696982181481,
"repo_name": "lobocv/eventdispatcher",
"id": "5ad7d48ae2e6d6e7726c13d491897ed6e22d08b1",
"size": "9709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventdispatcher/eventdispatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99232"
}
],
"symlink_target": ""
}
|
"""
Tests for NetApp API layer
"""
import ddt
from lxml import etree
import mock
from oslo_utils import netutils
import paramiko
import six
from six.moves import urllib
from cinder import exception
from cinder.i18n import _
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as zapi_fakes)
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
@ddt.ddt
class NetAppApiServerTests(test.TestCase):
"""Test case for NetApp API server methods"""
def setUp(self):
self.root = netapp_api.NaServer('127.0.0.1')
super(NetAppApiServerTests, self).setUp()
@ddt.data(None, 'ftp')
def test_set_transport_type_value_error(self, transport_type):
"""Tests setting an invalid transport type"""
self.assertRaises(ValueError, self.root.set_transport_type,
transport_type)
@ddt.data({'params': {'transport_type': 'http',
'server_type_filer': 'filer'}},
{'params': {'transport_type': 'http',
'server_type_filer': 'xyz'}},
{'params': {'transport_type': 'https',
'server_type_filer': 'filer'}},
{'params': {'transport_type': 'https',
'server_type_filer': 'xyz'}})
@ddt.unpack
def test_set_transport_type_valid(self, params):
"""Tests setting a valid transport type"""
self.root._server_type = params['server_type_filer']
mock_invoke = self.mock_object(self.root, 'set_port')
self.root.set_transport_type(params['transport_type'])
expected_call_args = zapi_fakes.FAKE_CALL_ARGS_LIST
self.assertIn(mock_invoke.call_args, expected_call_args)
@ddt.data('stor', 'STORE', '')
def test_set_server_type_value_error(self, server_type):
"""Tests Value Error on setting the wrong server type"""
self.assertRaises(ValueError, self.root.set_server_type, server_type)
@ddt.data('!&', '80na', '')
def test_set_port__value_error(self, port):
"""Tests Value Error on trying to set port with a non-integer"""
self.assertRaises(ValueError, self.root.set_port, port)
@ddt.data('!&', '80na', '')
def test_set_timeout_value_error(self, timeout):
"""Tests Value Error on trying to set port with a non-integer"""
self.assertRaises(ValueError, self.root.set_timeout, timeout)
@ddt.data({'params': {'major': 1, 'minor': '20a'}},
{'params': {'major': '20a', 'minor': 1}},
{'params': {'major': '!*', 'minor': '20a'}})
@ddt.unpack
def test_set_api_version_value_error(self, params):
"""Tests Value Error on setting non-integer version"""
self.assertRaises(ValueError, self.root.set_api_version, **params)
def test_set_api_version_valid(self):
"""Tests Value Error on setting non-integer version"""
args = {'major': '20', 'minor': 1}
expected_call_args_list = [mock.call('20'), mock.call(1)]
mock_invoke = self.mock_object(six, 'text_type', return_value='str')
self.root.set_api_version(**args)
self.assertEqual(expected_call_args_list, mock_invoke.call_args_list)
@ddt.data({'params': {'result': zapi_fakes.FAKE_RESULT_API_ERR_REASON}},
{'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_INVALID}},
{'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_VALID}})
@ddt.unpack
def test_invoke_successfully_naapi_error(self, params):
"""Tests invoke successfully raising NaApiError"""
self.mock_object(self.root, 'send_http_request',
return_value=params['result'])
self.assertRaises(netapp_api.NaApiError,
self.root.invoke_successfully,
zapi_fakes.FAKE_NA_ELEMENT)
def test_invoke_successfully_no_error(self):
"""Tests invoke successfully with no errors"""
self.mock_object(self.root, 'send_http_request',
return_value=zapi_fakes.FAKE_RESULT_SUCCESS)
self.assertEqual(zapi_fakes.FAKE_RESULT_SUCCESS.to_string(),
self.root.invoke_successfully(
zapi_fakes.FAKE_NA_ELEMENT).to_string())
def test__create_request(self):
"""Tests method _create_request"""
self.root._ns = zapi_fakes.FAKE_XML_STR
self.root._api_version = '1.20'
self.mock_object(self.root, '_enable_tunnel_request')
self.mock_object(netapp_api.NaElement, 'add_child_elem')
self.mock_object(netapp_api.NaElement, 'to_string',
return_value=zapi_fakes.FAKE_XML_STR)
mock_invoke = self.mock_object(urllib.request, 'Request')
self.root._create_request(zapi_fakes.FAKE_NA_ELEMENT, True)
self.assertTrue(mock_invoke.called)
@ddt.data({'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_5}},
{'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_14}})
@ddt.unpack
def test__enable_tunnel_request__value_error(self, params):
"""Tests value errors with creating tunnel request"""
self.assertRaises(ValueError, params['server']._enable_tunnel_request,
'test')
def test__enable_tunnel_request_valid(self):
"""Tests creating tunnel request with correct values"""
netapp_elem = zapi_fakes.FAKE_NA_ELEMENT
server = zapi_fakes.FAKE_NA_SERVER_API_1_20
mock_invoke = self.mock_object(netapp_elem, 'add_attr')
expected_call_args = [mock.call('vfiler', 'filer'),
mock.call('vfiler', 'server')]
server._enable_tunnel_request(netapp_elem)
self.assertEqual(expected_call_args, mock_invoke.call_args_list)
def test__parse_response__naapi_error(self):
"""Tests NaApiError on no response"""
self.assertRaises(netapp_api.NaApiError,
self.root._parse_response, None)
def test__parse_response_no_error(self):
"""Tests parse function with appropriate response"""
mock_invoke = self.mock_object(etree, 'XML', return_value='xml')
self.root._parse_response(zapi_fakes.FAKE_XML_STR)
mock_invoke.assert_called_with(zapi_fakes.FAKE_XML_STR)
def test__build_opener_not_implemented_error(self):
"""Tests whether certificate style authorization raises Exception"""
self.root._auth_style = 'not_basic_auth'
self.assertRaises(NotImplementedError, self.root._build_opener)
def test__build_opener_valid(self):
"""Tests whether build opener works with valid parameters"""
self.root._auth_style = 'basic_auth'
mock_invoke = self.mock_object(urllib.request, 'build_opener')
self.root._build_opener()
self.assertTrue(mock_invoke.called)
@ddt.data(None, zapi_fakes.FAKE_XML_STR)
def test_send_http_request_value_error(self, na_element):
"""Tests whether invalid NaElement parameter causes error"""
self.assertRaises(ValueError, self.root.send_http_request, na_element)
def test_send_http_request_http_error(self):
"""Tests handling of HTTPError"""
na_element = zapi_fakes.FAKE_NA_ELEMENT
self.mock_object(self.root, '_create_request',
return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT))
self.mock_object(netapp_api, 'LOG')
self.root._opener = zapi_fakes.FAKE_HTTP_OPENER
self.mock_object(self.root, '_build_opener')
self.mock_object(self.root._opener, 'open',
side_effect=urllib.error.HTTPError(url='', hdrs='',
fp=None,
code='401',
msg='httperror'))
self.assertRaises(netapp_api.NaApiError, self.root.send_http_request,
na_element)
def test_send_http_request_unknown_exception(self):
"""Tests handling of Unknown Exception"""
na_element = zapi_fakes.FAKE_NA_ELEMENT
self.mock_object(self.root, '_create_request',
return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT))
mock_log = self.mock_object(netapp_api, 'LOG')
self.root._opener = zapi_fakes.FAKE_HTTP_OPENER
self.mock_object(self.root, '_build_opener')
self.mock_object(self.root._opener, 'open', side_effect=Exception)
self.assertRaises(netapp_api.NaApiError, self.root.send_http_request,
na_element)
self.assertEqual(1, mock_log.exception.call_count)
def test_send_http_request_valid(self):
"""Tests the method send_http_request with valid parameters"""
na_element = zapi_fakes.FAKE_NA_ELEMENT
self.mock_object(self.root, '_create_request',
return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT))
self.mock_object(netapp_api, 'LOG')
self.root._opener = zapi_fakes.FAKE_HTTP_OPENER
self.mock_object(self.root, '_build_opener')
self.mock_object(self.root, '_get_result',
return_value=zapi_fakes.FAKE_NA_ELEMENT)
opener_mock = self.mock_object(self.root._opener, 'open')
opener_mock.read.side_effect = ['resp1', 'resp2']
self.root.send_http_request(na_element)
@ddt.data('192.168.1.0', '127.0.0.1', '0.0.0.0',
'::ffff:8', 'fdf8:f53b:82e4::53', '2001::1',
'fe80::200::abcd', '2001:0000:4136:e378:8000:63bf:3fff:fdd2')
def test__get_url(self, host):
port = '80'
root = netapp_api.NaServer(host, port=port)
protocol = root.TRANSPORT_TYPE_HTTP
url = root.URL_FILER
if netutils.is_valid_ipv6(host):
host = netutils.escape_ipv6(host)
result = '%s://%s:%s/%s' % (protocol, host, port, url)
url = root._get_url()
self.assertEqual(result, url)
class NetAppApiElementTransTests(test.TestCase):
"""Test case for NetApp API element translations."""
def test_translate_struct_dict_unique_key(self):
"""Tests if dict gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}
root.translate_struct(child)
self.assertEqual(3, len(root.get_children()))
self.assertEqual('v1', root.get_child_content('e1'))
self.assertEqual('v2', root.get_child_content('e2'))
self.assertEqual('v3', root.get_child_content('e3'))
def test_translate_struct_dict_nonunique_key(self):
"""Tests if list/dict gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]
root.translate_struct(child)
self.assertEqual(3, len(root.get_children()))
children = root.get_children()
for c in children:
if c.get_name() == 'e1':
self.assertIn(c.get_content(), ['v1', 'v3'])
else:
self.assertEqual('v2', c.get_content())
def test_translate_struct_list(self):
"""Tests if list gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = ['e1', 'e2']
root.translate_struct(child)
self.assertEqual(2, len(root.get_children()))
self.assertIsNone(root.get_child_content('e1'))
self.assertIsNone(root.get_child_content('e2'))
def test_translate_struct_tuple(self):
"""Tests if tuple gets properly converted to NaElements."""
root = netapp_api.NaElement('root')
child = ('e1', 'e2')
root.translate_struct(child)
self.assertEqual(2, len(root.get_children()))
self.assertIsNone(root.get_child_content('e1'))
self.assertIsNone(root.get_child_content('e2'))
def test_translate_invalid_struct(self):
"""Tests if invalid data structure raises exception."""
root = netapp_api.NaElement('root')
child = 'random child element'
self.assertRaises(ValueError, root.translate_struct, child)
def test_setter_builtin_types(self):
"""Tests str, int, float get converted to NaElement."""
root = netapp_api.NaElement('root')
root['e1'] = 'v1'
root['e2'] = 1
root['e3'] = 2.0
root['e4'] = 8
self.assertEqual(4, len(root.get_children()))
self.assertEqual('v1', root.get_child_content('e1'))
self.assertEqual('1', root.get_child_content('e2'))
self.assertEqual('2.0', root.get_child_content('e3'))
self.assertEqual('8', root.get_child_content('e4'))
def test_setter_na_element(self):
"""Tests na_element gets appended as child."""
root = netapp_api.NaElement('root')
root['e1'] = netapp_api.NaElement('nested')
self.assertEqual(1, len(root.get_children()))
e1 = root.get_child_by_name('e1')
self.assertIsInstance(e1, netapp_api.NaElement)
self.assertIsInstance(e1.get_child_by_name('nested'),
netapp_api.NaElement)
def test_setter_child_dict(self):
"""Tests dict is appended as child to root."""
root = netapp_api.NaElement('root')
root['d'] = {'e1': 'v1', 'e2': 'v2'}
e1 = root.get_child_by_name('d')
self.assertIsInstance(e1, netapp_api.NaElement)
sub_ch = e1.get_children()
self.assertEqual(2, len(sub_ch))
for c in sub_ch:
self.assertIn(c.get_name(), ['e1', 'e2'])
if c.get_name() == 'e1':
self.assertEqual('v1', c.get_content())
else:
self.assertEqual('v2', c.get_content())
def test_setter_child_list_tuple(self):
"""Tests list/tuple are appended as child to root."""
root = netapp_api.NaElement('root')
root['l'] = ['l1', 'l2']
root['t'] = ('t1', 't2')
l = root.get_child_by_name('l')
self.assertIsInstance(l, netapp_api.NaElement)
t = root.get_child_by_name('t')
self.assertIsInstance(t, netapp_api.NaElement)
for le in l.get_children():
self.assertIn(le.get_name(), ['l1', 'l2'])
for te in t.get_children():
self.assertIn(te.get_name(), ['t1', 't2'])
def test_setter_no_value(self):
"""Tests key with None value."""
root = netapp_api.NaElement('root')
root['k'] = None
self.assertIsNone(root.get_child_content('k'))
def test_setter_invalid_value(self):
"""Tests invalid value raises exception."""
root = netapp_api.NaElement('root')
try:
root['k'] = netapp_api.NaServer('localhost')
except Exception as e:
if not isinstance(e, TypeError):
self.fail(_('Error not a TypeError.'))
def test_setter_invalid_key(self):
"""Tests invalid value raises exception."""
root = netapp_api.NaElement('root')
try:
root[None] = 'value'
except Exception as e:
if not isinstance(e, KeyError):
self.fail(_('Error not a KeyError.'))
def test_getter_key_error(self):
"""Tests invalid key raises exception"""
root = netapp_api.NaElement('root')
self.mock_object(root, 'get_child_by_name', return_value=None)
self.mock_object(root, 'has_attr', return_value=None)
self.assertRaises(KeyError,
netapp_api.NaElement.__getitem__,
root, '123')
def test_getter_na_element_list(self):
"""Tests returning NaElement list"""
root = netapp_api.NaElement('root')
root['key'] = ['val1', 'val2']
self.assertEqual(root.get_child_by_name('key').get_name(),
root.__getitem__('key').get_name())
def test_getter_child_text(self):
"""Tests NaElement having no children"""
root = netapp_api.NaElement('root')
root.set_content('FAKE_CONTENT')
self.mock_object(root, 'get_child_by_name', return_value=root)
self.assertEqual('FAKE_CONTENT',
root.__getitem__('root'))
def test_getter_child_attr(self):
"""Tests invalid key raises exception"""
root = netapp_api.NaElement('root')
root.add_attr('val', 'FAKE_VALUE')
self.assertEqual('FAKE_VALUE',
root.__getitem__('val'))
def test_add_node_with_children(self):
"""Tests adding a child node with its own children"""
root = netapp_api.NaElement('root')
self.mock_object(netapp_api.NaElement,
'create_node_with_children',
return_value=zapi_fakes.FAKE_INVOKE_DATA)
mock_invoke = self.mock_object(root, 'add_child_elem')
root.add_node_with_children('options')
mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA)
def test_create_node_with_children(self):
"""Tests adding a child node with its own children"""
root = netapp_api.NaElement('root')
self.mock_object(root, 'add_new_child', return_value='abc')
result_xml = str(root.create_node_with_children(
'options', test1=zapi_fakes.FAKE_XML_STR,
test2=zapi_fakes.FAKE_XML_STR))
# No ordering is guaranteed for elements in this XML.
self.assertTrue(result_xml.startswith("<options>"), result_xml)
self.assertTrue("<test1>abc</test1>" in result_xml, result_xml)
self.assertTrue("<test2>abc</test2>" in result_xml, result_xml)
self.assertTrue(result_xml.rstrip().endswith("</options>"), result_xml)
def test_add_new_child(self):
"""Tests adding a child node with its own children"""
root = netapp_api.NaElement('root')
self.mock_object(netapp_api.NaElement,
'_convert_entity_refs',
return_value=zapi_fakes.FAKE_INVOKE_DATA)
root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA)
self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string())
def test_get_attr_names_empty_attr(self):
"""Tests _elements.attrib being empty"""
root = netapp_api.NaElement('root')
self.assertEqual([], root.get_attr_names())
def test_get_attr_names(self):
"""Tests _elements.attrib being non-empty"""
root = netapp_api.NaElement('root')
root.add_attr('attr1', 'a1')
root.add_attr('attr2', 'a2')
self.assertEqual(['attr1', 'attr2'], root.get_attr_names())
@ddt.ddt
class SSHUtilTests(test.TestCase):
"""Test Cases for SSH API invocation."""
def setUp(self):
super(SSHUtilTests, self).setUp()
self.mock_object(netapp_api.SSHUtil, '_init_ssh_pool')
self.sshutil = netapp_api.SSHUtil('127.0.0.1',
'fake_user',
'fake_password')
def test_execute_command(self):
ssh = mock.Mock(paramiko.SSHClient)
stdin, stdout, stderr = self._mock_ssh_channel_files(
paramiko.ChannelFile)
self.mock_object(ssh, 'exec_command',
return_value=(stdin, stdout, stderr))
wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout')
stdout_read = self.mock_object(stdout, 'read', return_value='')
self.sshutil.execute_command(ssh, 'ls')
wait_on_stdout.assert_called_once_with(stdout,
netapp_api.SSHUtil.RECV_TIMEOUT)
stdout_read.assert_called_once_with()
def test_execute_read_exception(self):
ssh = mock.Mock(paramiko.SSHClient)
exec_command = self.mock_object(ssh, 'exec_command')
exec_command.side_effect = paramiko.SSHException('Failure')
wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout')
self.assertRaises(paramiko.SSHException,
self.sshutil.execute_command, ssh, 'ls')
wait_on_stdout.assert_not_called()
@ddt.data('Password:',
'Password: ',
'Password: \n\n')
def test_execute_command_with_prompt(self, response):
ssh = mock.Mock(paramiko.SSHClient)
stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel)
stdout_read = self.mock_object(stdout.channel, 'recv',
return_value=response)
stdin_write = self.mock_object(stdin, 'write')
self.mock_object(ssh, 'exec_command',
return_value=(stdin, stdout, stderr))
wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout')
self.sshutil.execute_command_with_prompt(ssh, 'sudo ls',
'Password:', 'easypass')
wait_on_stdout.assert_called_once_with(stdout,
netapp_api.SSHUtil.RECV_TIMEOUT)
stdout_read.assert_called_once_with(999)
stdin_write.assert_called_once_with('easypass' + '\n')
def test_execute_command_unexpected_response(self):
ssh = mock.Mock(paramiko.SSHClient)
stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel)
stdout_read = self.mock_object(stdout.channel, 'recv',
return_value='bad response')
self.mock_object(ssh, 'exec_command',
return_value=(stdin, stdout, stderr))
wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout')
self.assertRaises(exception.VolumeBackendAPIException,
self.sshutil.execute_command_with_prompt,
ssh, 'sudo ls', 'Password:', 'easypass')
wait_on_stdout.assert_called_once_with(stdout,
netapp_api.SSHUtil.RECV_TIMEOUT)
stdout_read.assert_called_once_with(999)
def test_wait_on_stdout(self):
stdout = mock.Mock()
stdout.channel = mock.Mock(paramiko.Channel)
exit_status = self.mock_object(stdout.channel, 'exit_status_ready',
return_value=False)
self.sshutil._wait_on_stdout(stdout, 1)
exit_status.assert_any_call()
self.assertGreater(exit_status.call_count, 2)
def _mock_ssh_channel_files(self, channel):
stdin = mock.Mock()
stdin.channel = mock.Mock(channel)
stdout = mock.Mock()
stdout.channel = mock.Mock(channel)
stderr = mock.Mock()
stderr.channel = mock.Mock(channel)
return stdin, stdout, stderr
|
{
"content_hash": "0ae8d86e7158352b404d37a44001bf2e",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 79,
"avg_line_length": 41.697632058287795,
"alnum_prop": 0.5887209505504106,
"repo_name": "Datera/cinder",
"id": "537f8996c9b432c606cf685090a77a30fa548312",
"size": "23756",
"binary": false,
"copies": "1",
"ref": "refs/heads/datera_queens_backport",
"path": "cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15242306"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
import pmagpy.pmag as pmag
#
#
def main():
"""
NAME
measurements_normalize.py
DESCRIPTION
takes magic_measurements file and normalized moment by sample_weight and sample_volume in the er_specimens table
SYNTAX
measurements_normalize.py [command line options]
OPTIONS
-f FILE: specify input file, default is: magic_measurements.txt
-fsp FILE: specify input specimen file, default is: er_specimens.txt
-F FILE: specify output measurements, default is to overwrite input file
"""
#
# initialize variables
#
#
#
dir_path='.'
if "-WD" in sys.argv:
ind=sys.argv.index("-WD")
dir_path=sys.argv[ind+1]
meas_file,spec_file= dir_path+"/magic_measurements.txt",dir_path+"/er_specimens.txt"
out_file=meas_file
MeasRecs,SpecRecs=[],[]
OutRecs=[]
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if "-f" in sys.argv:
ind=sys.argv.index("-f")
meas_file=dir_path+'/'+sys.argv[ind+1]
if "-fsp" in sys.argv:
ind=sys.argv.index("-fsp")
spec_file=dir_path+'/'+sys.argv[ind+1]
if "-F" in sys.argv:
ind=sys.argv.index("-F")
out_file=dir_path+'/'+sys.argv[ind+1]
MeasRecs,file_type=pmag.magic_read(meas_file)
Specs,file_type=pmag.magic_read(spec_file)
for rec in MeasRecs:
if 'measurement_magn_moment' in list(rec.keys()) and rec['measurement_magn_moment'] != "":
for spec in Specs:
if spec['er_specimen_name']==rec['er_specimen_name']:
if 'specimen_weight' in list(spec.keys()) and spec['specimen_weight']!="":
rec['measurement_magn_mass']='%e'%(old_div(float(rec['measurement_magn_moment']),float(spec['specimen_weight'])))
if 'specimen_volume' in list(spec.keys()) and spec['specimen_volume']!="":
rec['measurement_magn_volume']='%e'%(old_div(float(rec['measurement_magn_moment']),float(spec['specimen_volume'])))
break
if 'measurement_magn_volume' not in list(rec.keys()): rec['measurement_magn_volume']=''
if 'measurement_magn_mass' not in list(rec.keys()): rec['measurement_magn_mass']=''
OutRecs.append(rec)
pmag.magic_write(out_file,OutRecs,"magic_measurements")
print("Data saved in ", out_file)
if __name__ == "__main__":
main()
|
{
"content_hash": "7378f2adc098c3f37d2aa85e8698d3a0",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 139,
"avg_line_length": 37.44117647058823,
"alnum_prop": 0.600942655145326,
"repo_name": "Caoimhinmg/PmagPy",
"id": "98178b0735e1c03af17b14be2446500ec6a3b9f7",
"size": "2568",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "programs/measurements_normalize.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "D",
"bytes": "5748"
},
{
"name": "HTML",
"bytes": "63859"
},
{
"name": "Inno Setup",
"bytes": "3675"
},
{
"name": "Jupyter Notebook",
"bytes": "14175459"
},
{
"name": "Python",
"bytes": "14896053"
},
{
"name": "Shell",
"bytes": "6986"
},
{
"name": "TeX",
"bytes": "3146"
}
],
"symlink_target": ""
}
|
from gii.core import *
from freejoy import Joystick, getJoystickCount
from MOAIRuntime import getAKU
##----------------------------------------------------------------##
class MOAIJoystickSensorGetter():
def __init__( self, joyId ):
joyName = 'joy-%d' % joyId
#button - keyboard sensor
self.buttonSensorName = joyName + '.button'
#axis
axisSensorNames = []
maxJoystickAxisCount = 6
for axisId in range( 1, maxJoystickAxisCount + 1 ):
axisSensorName = joyName + ( '.axis-%d' % axisId )
axisSensorNames.append( axisSensorName )
self.axisSensorNames = axisSensorNames
def getButtonSensor( self, inputDevice ):
return inputDevice.getSensor( self.buttonSensorName )
def getAxisSensor( self, inputDevice, axisId ):
name = self.axisSensorNames[ axisId ]
return inputDevice.getSensor( name )
##----------------------------------------------------------------##
class MOAIJoystickHook( EditorModule ):
name = 'joystick_hook'
dependency = [ 'moai' ]
def __init__( self ):
self.joysticks = []
self.joystickSensorGetters = []
for i in range( 0, 8 ):
j = Joystick( i )
j.setButtonListener( self.onButtonEvent )
j.setAxisListener( self.onAxisEvent )
j.setContext( i )
self.joysticks.append( j )
getter = MOAIJoystickSensorGetter( i + 1 )
self.joystickSensorGetters.append( getter )
self.joystickCount = 0
self.inputDevice = None
def onLoad( self ):
self.refreshJoysticks()
runtime = self.getModule( 'moai' )
def setInputDevice( self, inputDevice ):
self.inputDevice = inputDevice
def onUpdate( self ):
joysticks = self.joysticks
for i in range( 0, self.joystickCount ):
joysticks[ i ].poll()
def refreshJoysticks( self ):
self.joystickCount = getJoystickCount()
logging.info( 'found joysticks: %d' % self.joystickCount )
for j in self.joysticks:
j.flush()
def onButtonEvent( self, joystick, buttonId, down ):
jid = joystick.getContext()
inputDevice = self.inputDevice
if not inputDevice: return
getter = self.joystickSensorGetters[ jid ]
sensor = getter.getButtonSensor( inputDevice )
if sensor:
sensor.enqueueKeyEvent( buttonId, down )
def onAxisEvent( self, joystick, axisId, value ):
jid = joystick.getContext()
inputDevice = self.inputDevice
if not inputDevice: return
getter = self.joystickSensorGetters[ jid ]
sensor = getter.getAxisSensor( inputDevice, axisId )
if sensor:
sensor.enqueueEvent( value )
# print 'joyAxis', axisId, value
|
{
"content_hash": "095e0ba846eef1aa07752a1b44d00f84",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 68,
"avg_line_length": 30.62962962962963,
"alnum_prop": 0.6727126158806933,
"repo_name": "tommo/gii",
"id": "52fe39990b602f3bc752e9a684ccd5f7be678ac7",
"size": "2481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/gii/moai/MOAIJoystickHook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "398"
},
{
"name": "C",
"bytes": "1118982"
},
{
"name": "C++",
"bytes": "743466"
},
{
"name": "CSS",
"bytes": "5956"
},
{
"name": "HTML",
"bytes": "126233"
},
{
"name": "JavaScript",
"bytes": "129855"
},
{
"name": "Lua",
"bytes": "1290198"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Objective-C",
"bytes": "28896"
},
{
"name": "Objective-C++",
"bytes": "129214"
},
{
"name": "Python",
"bytes": "2676186"
},
{
"name": "Shell",
"bytes": "11215"
}
],
"symlink_target": ""
}
|
import mock
import datetime as dt
from nose.tools import * # noqa (PEP8 asserts)
import pytest
from osf_tests.factories import (
ProjectFactory,
UserFactory,
RegistrationFactory,
NodeFactory,
CollectionFactory,
)
from osf.models import NodeRelation
from tests.base import OsfTestCase, get_default_metaschema
from framework.auth import Auth
from website.project.views.node import _view_project, _serialize_node_search, _get_children, _get_readable_descendants
from website.views import serialize_node_summary
from website.profile import utils
from website import filters, settings
from website.util import permissions
pytestmark = pytest.mark.django_db
class TestUserSerializers(OsfTestCase):
def test_serialize_user(self):
master = UserFactory()
user = UserFactory()
master.merge_user(user)
d = utils.serialize_user(user)
assert_equal(d['id'], user._primary_key)
assert_equal(d['url'], user.url)
assert_equal(d.get('username', None), None)
assert_equal(d['fullname'], user.fullname)
assert_equal(d['registered'], user.is_registered)
assert_equal(d['absolute_url'], user.absolute_url)
assert_equal(d['date_registered'], user.date_registered.strftime('%Y-%m-%d'))
assert_equal(d['active'], user.is_active)
def test_serialize_user_merged(self):
master = UserFactory()
user = UserFactory()
master.merge_user(user)
d = utils.serialize_user(user, full=True)
assert_true(d['is_merged'])
assert_equal(d['merged_by']['url'], user.merged_by.url)
assert_equal(d['merged_by']['absolute_url'], user.merged_by.absolute_url)
def test_serialize_user_full(self):
user = UserFactory()
ProjectFactory(creator=user, is_public=False)
NodeFactory(creator=user)
ProjectFactory(creator=user, is_public=True)
CollectionFactory(creator=user)
d = utils.serialize_user(user, full=True, include_node_counts=True)
profile_image_url = filters.profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
user,
use_ssl=True,
size=settings.PROFILE_IMAGE_LARGE)
assert_equal(d['id'], user._primary_key)
assert_equal(d['url'], user.url)
assert_equal(d.get('username'), None)
assert_equal(d['fullname'], user.fullname)
assert_equal(d['registered'], user.is_registered)
assert_equal(d['profile_image_url'], profile_image_url)
assert_equal(d['absolute_url'], user.absolute_url)
assert_equal(d['date_registered'], user.date_registered.strftime('%Y-%m-%d'))
projects = [
node
for node in user.contributed
if node.category == 'project'
and not node.is_registration
and not node.is_deleted
]
public_projects = [p for p in projects if p.is_public]
assert_equal(d['number_projects'], len(projects))
assert_equal(d['number_public_projects'], len(public_projects))
class TestNodeSerializers(OsfTestCase):
# Regression test for #489
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_serialize_node_summary_private_node_should_include_id_and_primary_boolean_reg_and_fork(self):
user = UserFactory()
# user cannot see this node
node = ProjectFactory(is_public=False)
result = serialize_node_summary(
node, auth=Auth(user),
primary=True,
)
# serialized result should have id and primary
assert_equal(result['id'], node._primary_key)
assert_true(result['primary'], True)
assert_equal(result['is_registration'], node.is_registration)
assert_equal(result['is_fork'], node.is_fork)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/668
def test_serialize_node_summary_for_registration_uses_correct_date_format(self):
reg = RegistrationFactory()
res = serialize_node_summary(reg, auth=Auth(reg.creator))
assert_equal(res['registered_date'],
reg.registered_date.strftime('%Y-%m-%d %H:%M UTC'))
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/858
def test_serialize_node_summary_private_registration_should_include_is_registration(self):
user = UserFactory()
# non-contributor cannot see private registration of public project
node = ProjectFactory(is_public=True)
reg = RegistrationFactory(project=node, user=node.creator)
res = serialize_node_summary(reg, auth=Auth(user))
# serialized result should have is_registration
assert_true(res['is_registration'])
# https://openscience.atlassian.net/browse/OSF-4618
def test_get_children_only_returns_child_nodes_with_admin_permissions(self):
user = UserFactory()
admin_project = ProjectFactory()
admin_project.add_contributor(user, auth=Auth(admin_project.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
admin_project.save()
admin_component = NodeFactory(parent=admin_project)
admin_component.add_contributor(user, auth=Auth(admin_component.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
admin_component.save()
read_and_write = NodeFactory(parent=admin_project)
read_and_write.add_contributor(user, auth=Auth(read_and_write.creator),
permissions=permissions.expand_permissions(permissions.WRITE))
read_and_write.save()
read_only = NodeFactory(parent=admin_project)
read_only.add_contributor(user, auth=Auth(read_only.creator),
permissions=permissions.expand_permissions(permissions.READ))
read_only.save()
non_contributor = NodeFactory(parent=admin_project)
components = _get_children(admin_project, Auth(user))
assert_equal(len(components), 1)
def test_serialize_node_summary_private_fork_should_include_is_fork(self):
user = UserFactory()
# non-contributor cannot see private fork of public project
node = ProjectFactory(is_public=True)
consolidated_auth = Auth(user=node.creator)
fork = node.fork_node(consolidated_auth)
res = serialize_node_summary(
fork, auth=Auth(user),
primary=True,
)
# serialized result should have is_fork
assert_true(res['is_fork'])
def test_serialize_node_summary_private_fork_private_project_should_include_is_fork(self):
# contributor on a private project
user = UserFactory()
node = ProjectFactory(is_public=False)
node.add_contributor(user)
# contributor cannot see private fork of this project
consolidated_auth = Auth(user=node.creator)
fork = node.fork_node(consolidated_auth)
res = serialize_node_summary(
fork, auth=Auth(user),
primary=True,
)
# serialized result should have is_fork
assert_false(res['can_view'])
assert_true(res['is_fork'])
def test_serialize_node_summary_child_exists(self):
user = UserFactory()
parent_node = ProjectFactory(creator=user)
linked_node = ProjectFactory(creator=user)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], False)
parent_node.add_node_link(linked_node, Auth(user), save=True)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], False)
child_component = NodeFactory(creator=user, parent=parent_node)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], True)
def test_serialize_node_search_returns_only_visible_contributors(self):
node = NodeFactory()
non_visible_contributor = UserFactory()
node.add_contributor(non_visible_contributor, visible=False)
serialized_node = _serialize_node_search(node)
assert_equal(serialized_node['firstAuthor'], node.visible_contributors[0].family_name)
assert_equal(len(node.visible_contributors), 1)
assert_false(serialized_node['etal'])
class TestViewProject(OsfTestCase):
def setUp(self):
super(TestViewProject, self).setUp()
self.user = UserFactory()
self.node = ProjectFactory(creator=self.user)
def test_view_project_pending_registration_for_admin_contributor_does_contain_cancel_link(self):
pending_reg = RegistrationFactory(project=self.node, archive=True)
assert_true(pending_reg.is_pending_registration)
result = _view_project(pending_reg, Auth(self.user))
assert_not_equal(result['node']['disapproval_link'], '')
assert_in('/?token=', result['node']['disapproval_link'])
pending_reg.delete()
def test_view_project_pending_registration_for_write_contributor_does_not_contain_cancel_link(self):
write_user = UserFactory()
self.node.add_contributor(write_user, permissions=permissions.WRITE,
auth=Auth(self.user), save=True)
pending_reg = RegistrationFactory(project=self.node, archive=True)
assert_true(pending_reg.is_pending_registration)
result = _view_project(pending_reg, Auth(write_user))
assert_equal(result['node']['disapproval_link'], '')
pending_reg.delete()
def test_view_project_child_exists(self):
linked_node = ProjectFactory(creator=self.user)
result = _view_project(self.node, Auth(self.user))
assert_equal(result['node']['child_exists'], False)
self.node.add_node_link(linked_node, Auth(self.user), save=True)
result = _view_project(self.node, Auth(self.user))
assert_equal(result['node']['child_exists'], False)
child_component = NodeFactory(creator=self.user, parent=self.node)
result = _view_project(self.node, Auth(self.user))
assert_equal(result['node']['child_exists'], True)
class TestViewProjectEmbeds(OsfTestCase):
def setUp(self):
super(TestViewProjectEmbeds, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user)
def test_view_project_embed_forks_excludes_registrations(self):
project = ProjectFactory()
fork = project.fork_node(Auth(project.creator))
reg = RegistrationFactory(project=fork)
res = _view_project(project, auth=Auth(project.creator), embed_forks=True)
assert_in('forks', res['node'])
assert_equal(len(res['node']['forks']), 1)
assert_equal(res['node']['forks'][0]['id'], fork._id)
# Regression test
def test_view_project_embed_registrations_sorted_by_registered_date_descending(self):
# register a project several times, with various registered_dates
registrations = []
for days_ago in (21, 3, 2, 8, 13, 5, 1):
registration = RegistrationFactory(project=self.project)
reg_date = registration.registered_date - dt.timedelta(days_ago)
registration.registered_date = reg_date
registration.save()
registrations.append(registration)
registrations.sort(key=lambda r: r.registered_date, reverse=True)
expected = [r._id for r in registrations]
data = _view_project(node=self.project, auth=Auth(self.project.creator), embed_registrations=True)
actual = [n['id'] for n in data['node']['registrations']]
assert_equal(actual, expected)
def test_view_project_embed_descendants(self):
child = NodeFactory(parent=self.project, creator=self.user)
res = _view_project(self.project, auth=Auth(self.project.creator), embed_descendants=True)
assert_in('descendants', res['node'])
assert_equal(len(res['node']['descendants']), 1)
assert_equal(res['node']['descendants'][0]['id'], child._id)
class TestGetReadableDescendants(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
def test__get_readable_descendants(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
nodes, all_readable = _get_readable_descendants(auth=Auth(project.creator), node=project)
assert_equal(nodes[0]._id, child._id)
assert_true(all_readable)
def test__get_readable_descendants_includes_pointers(self):
project = ProjectFactory(creator=self.user)
pointed = ProjectFactory()
node_relation = project.add_pointer(pointed, auth=Auth(self.user))
project.save()
nodes, all_readable = _get_readable_descendants(auth=Auth(project.creator), node=project)
assert_equal(len(nodes), 1)
assert_equal(nodes[0].title, pointed.title)
assert_equal(nodes[0]._id, pointed._id)
assert_true(all_readable)
def test__get_readable_descendants_masked_by_permissions(self):
# Users should be able to see through components they do not have
# permissions to.
# Users should not be able to see through links to nodes they do not
# have permissions to.
#
# 1(AB)
# / | \
# * | \
# / | \
# 2(A) 4(B) 7(A)
# | | | \
# | | | \
# 3(AB) 5(B) 8(AB) 9(B)
# |
# |
# 6(A)
#
#
userA = UserFactory(fullname='User A')
userB = UserFactory(fullname='User B')
project1 = ProjectFactory(creator=self.user, title='One')
project1.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
project1.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component2 = ProjectFactory(creator=self.user, title='Two')
component2.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component3 = ProjectFactory(creator=self.user, title='Three')
component3.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component3.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component4 = ProjectFactory(creator=self.user, title='Four')
component4.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component5 = ProjectFactory(creator=self.user, title='Five')
component5.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component6 = ProjectFactory(creator=self.user, title='Six')
component6.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component7 = ProjectFactory(creator=self.user, title='Seven')
component7.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component8 = ProjectFactory(creator=self.user, title='Eight')
component8.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component8.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component9 = ProjectFactory(creator=self.user, title='Nine')
component9.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
project1.add_pointer(component2, Auth(self.user))
NodeRelation.objects.create(parent=project1, child=component4)
NodeRelation.objects.create(parent=project1, child=component7)
NodeRelation.objects.create(parent=component2, child=component3)
NodeRelation.objects.create(parent=component4, child=component5)
NodeRelation.objects.create(parent=component5, child=component6)
NodeRelation.objects.create(parent=component7, child=component8)
NodeRelation.objects.create(parent=component7, child=component9)
nodes, all_readable = _get_readable_descendants(auth=Auth(userA), node=project1)
assert_equal(len(nodes), 3)
assert_false(all_readable)
for node in nodes:
assert_in(node.title, ['Two', 'Six', 'Seven'])
nodes, all_readable = _get_readable_descendants(auth=Auth(userB), node=project1)
assert_equal(len(nodes), 3)
assert_false(all_readable)
for node in nodes:
assert_in(node.title, ['Four', 'Eight', 'Nine'])
class TestNodeLogSerializers(OsfTestCase):
def test_serialize_node_for_logs(self):
node = NodeFactory()
d = node.serialize()
assert_equal(d['id'], node._primary_key)
assert_equal(d['category'], node.category_display)
assert_equal(d['node_type'], node.project_or_component)
assert_equal(d['url'], node.url)
assert_equal(d['title'], node.title)
assert_equal(d['api_url'], node.api_url)
assert_equal(d['is_public'], node.is_public)
assert_equal(d['is_registration'], node.is_registration)
class TestAddContributorJson(OsfTestCase):
def setUp(self):
super(TestAddContributorJson, self).setUp()
self.user = UserFactory()
self.profile = self.user.profile_url
self.user_id = self.user._primary_key
self.fullname = self.user.fullname
self.username = self.user.username
self.jobs = [{
'institution': 'School of Lover Boys',
'department': 'Fancy Patter',
'title': 'Lover Boy',
'start': None,
'end': None,
}]
self.schools = [{
'degree': 'Vibing',
'institution': 'Queens University',
'department': '',
'location': '',
'start': None,
'end': None,
}]
def test_add_contributor_json(self):
# User with no employment or education info listed
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], None)
assert_equal(user_info['education'], None)
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['profile_image_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_edu(self):
# Test user with only education information
self.user.schools = self.schools
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], None)
assert_equal(user_info['education'], self.user.schools[0]['institution'])
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['profile_image_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_job(self):
# Test user with only employment information
self.user.jobs = self.jobs
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], self.user.jobs[0]['institution'])
assert_equal(user_info['education'], None)
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['profile_image_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_job_and_edu(self):
# User with both employment and education information
self.user.jobs = self.jobs
self.user.schools = self.schools
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], self.user.jobs[0]['institution'])
assert_equal(user_info['education'], self.user.schools[0]['institution'])
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['profile_image_url'])
assert_equal(user_info['profile_url'], self.profile)
|
{
"content_hash": "385b722f0c965a2468ed59a4a6bf37c8",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 118,
"avg_line_length": 43.63469387755102,
"alnum_prop": 0.6373883354380057,
"repo_name": "laurenrevere/osf.io",
"id": "6ed8816479f02b9bff06df67ede416c77ba70361",
"size": "21406",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/test_serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110148"
},
{
"name": "HTML",
"bytes": "228999"
},
{
"name": "JavaScript",
"bytes": "1809805"
},
{
"name": "Mako",
"bytes": "642995"
},
{
"name": "Python",
"bytes": "7692214"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
"""Start and stop tsproxy."""
import logging
import os
import re
import subprocess
import sys
from telemetry.core import util
from telemetry.internal.util import atexit_with_log
import py_utils
_TSPROXY_PATH = os.path.join(
util.GetTelemetryThirdPartyDir(), 'tsproxy', 'tsproxy.py')
def ParseTsProxyPortFromOutput(output_line):
port_re = re.compile(
r'Started Socks5 proxy server on '
r'(?P<host>[^:]*):'
r'(?P<port>\d+)')
m = port_re.match(output_line.strip())
if m:
return int(m.group('port'))
class TsProxyServer(object):
"""Start and Stop Tsproxy.
TsProxy provides basic latency, download and upload traffic shaping. This
class provides a programming API to the tsproxy script in
telemetry/third_party/tsproxy/tsproxy.py
"""
def __init__(self, host_ip=None, http_port=None, https_port=None):
"""Initialize TsProxyServer.
"""
self._proc = None
self._port = None
self._is_running = False
self._host_ip = host_ip
assert bool(http_port) == bool(https_port)
self._http_port = http_port
self._https_port = https_port
@property
def port(self):
return self._port
def StartServer(self, timeout=10):
"""Start TsProxy server and verify that it started.
"""
cmd_line = [sys.executable, _TSPROXY_PATH]
cmd_line.extend([
'--port=0']) # Use port 0 so tsproxy picks a random available port.
if self._host_ip:
cmd_line.append('--desthost=%s' % self._host_ip)
if self._http_port:
cmd_line.append(
'--mapports=443:%s,*:%s' % (self._https_port, self._http_port))
logging.info('Tsproxy commandline: %r' % cmd_line)
self._proc = subprocess.Popen(
cmd_line, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=1)
atexit_with_log.Register(self.StopServer)
try:
py_utils.WaitFor(self._IsStarted, timeout)
logging.info('TsProxy port: %s', self._port)
self._is_running = True
except py_utils.TimeoutException:
err = self.StopServer()
raise RuntimeError(
'Error starting tsproxy: %s' % err)
def _IsStarted(self):
assert not self._is_running
assert self._proc
if self._proc.poll() is not None:
return False
self._proc.stdout.flush()
self._port = ParseTsProxyPortFromOutput(
output_line=self._proc.stdout.readline())
return self._port != None
def _IssueCommand(self, command_string, timeout):
logging.info('Issuing command to ts_proxy_server: %s', command_string)
command_output = []
self._proc.stdin.write('%s\n' % command_string)
self._proc.stdin.flush()
self._proc.stdout.flush()
def CommandStatusIsRead():
command_output.append(self._proc.stdout.readline().strip())
return (
command_output[-1] == 'OK' or command_output[-1] == 'ERROR')
py_utils.WaitFor(CommandStatusIsRead, timeout)
if not 'OK' in command_output:
raise RuntimeError('Failed to execute command %s:\n%s' %
(repr(command_string), '\n'.join(command_output)))
def UpdateOutboundPorts(self, http_port, https_port, timeout=5):
assert http_port and https_port
assert http_port != https_port
assert isinstance(http_port, int) and isinstance(https_port, int)
assert 1 <= http_port <= 65535
assert 1 <= https_port <= 65535
self._IssueCommand('set mapports 443:%i,*:%i' % (https_port, http_port),
timeout)
def UpdateTrafficSettings(
self, round_trip_latency_ms=0,
download_bandwidth_kbps=0, upload_bandwidth_kbps=0, timeout=5):
self._IssueCommand('set rtt %s' % round_trip_latency_ms, timeout)
self._IssueCommand('set inkbps %s' % download_bandwidth_kbps, timeout)
self._IssueCommand('set outkbps %s' % upload_bandwidth_kbps, timeout)
def StopServer(self):
"""Stop TsProxy Server."""
if not self._is_running:
logging.debug('Attempting to stop TsProxy server that is not running.')
return
if self._proc:
self._proc.terminate()
self._proc.wait()
err = self._proc.stderr.read()
self._proc = None
self._port = None
self._is_running = False
return err
def __enter__(self):
"""Add support for with-statement."""
self.StartServer()
return self
def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
"""Add support for with-statement."""
self.StopServer()
|
{
"content_hash": "933ac54810e6778bc90b62a86db10cc1",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 77,
"avg_line_length": 31.54609929078014,
"alnum_prop": 0.6490557553956835,
"repo_name": "catapult-project/catapult-csm",
"id": "4d88238a59960018bc1a1fdf423c60f3cdf36f53",
"size": "4611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/internal/util/ts_proxy_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
"""Utilities to help convert mp4s to fmp4s."""
import io
def find_box(segment: io.BytesIO, target_type: bytes, box_start: int = 0) -> int:
"""Find location of first box (or sub_box if box_start provided) of given type."""
if box_start == 0:
box_end = segment.seek(0, io.SEEK_END)
segment.seek(0)
index = 0
else:
segment.seek(box_start)
box_end = box_start + int.from_bytes(segment.read(4), byteorder="big")
index = box_start + 8
while 1:
if index > box_end - 8: # End of box, not found
break
segment.seek(index)
box_header = segment.read(8)
if box_header[4:8] == target_type:
yield index
segment.seek(index)
index += int.from_bytes(box_header[0:4], byteorder="big")
def get_init(segment: io.BytesIO) -> bytes:
"""Get init section from fragmented mp4."""
moof_location = next(find_box(segment, b"moof"))
segment.seek(0)
return segment.read(moof_location)
def get_m4s(segment: io.BytesIO, sequence: int) -> bytes:
"""Get m4s section from fragmented mp4."""
moof_location = next(find_box(segment, b"moof"))
mfra_location = next(find_box(segment, b"mfra"))
segment.seek(moof_location)
return segment.read(mfra_location - moof_location)
|
{
"content_hash": "6096a4b66d057921d032b7edc97a43b2",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 86,
"avg_line_length": 34.60526315789474,
"alnum_prop": 0.6152091254752852,
"repo_name": "titilambert/home-assistant",
"id": "00603807215a601f078753a9542988c6955a0720",
"size": "1315",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/stream/fmp4utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops # pylint: disable=unused-import
from tensorflow.python.ops import functional_ops # pylint: disable=unused-import
from tensorflow.python.ops import gradients
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import state_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients_impl._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients_impl._GatherInputs(to_ops, reached_ops)
between_ops.sort(key=lambda x: -x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op], _OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.stack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.stack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat([t4, t3], 0)
t6 = constant([2.0])
t7 = array_ops.concat([t5, t6], 0)
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = array_ops.concat([t3, t3, t3], 0)
t5 = constant([1.0])
t6 = array_ops.concat([t4, t5], 0)
t7 = array_ops.concat([t6, t3], 0)
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw.op.colocation_groups(), wx.op.colocation_groups())
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())
def testColocateGradientsWithAggregationInMultipleDevices(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
with g.device("/task:1"):
wx = math_ops.matmul(w, x)
with g.device("/task:2"):
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(w.op.colocation_groups() != gw2.op.colocation_groups())
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default():
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all(x is not None for x in grads))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y], aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default():
def _TestOpGrad(_, float_grad, string_grad):
"""Gradient function for TestStringOutput."""
self.assertEquals(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterGradient("TestStringOutput")(_TestOpGrad)
c = constant(1.0)
x, _ = test_ops.test_string_output(c)
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
grads = gradients.gradients(w, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
def testSingletonIndexedSlices(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
dy = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32))
dx, = gradients.gradients(y, x, grad_ys=dy)
# The gradient of tf.identity should pass the value through unchanged.
# A previous version of the code did this only for tf.Tensor, not
# tf.IndexedSlices.
self.assertEqual(dx, dy)
def testNonDifferentiableSwitchInWhileLoop(self):
with ops.Graph().as_default():
v = array_ops.placeholder(dtypes.float32, [])
def _Step(i, a, ta):
a += math_ops.cast(v, dtypes.int32)
return (i + 1, a, ta.write(i, a))
n = 4
i, _, ta = control_flow_ops.while_loop(
lambda i, *_: i < n,
_Step, [0, 0, tensor_array_ops.TensorArray(
dtypes.int32, size=n)])
target = ta.read(i - 1)
grad, = gradients.gradients(target, v)
self.assertIsNone(grad)
class FunctionGradientsTest(test_util.TensorFlowTestCase):
@classmethod
def XSquarePlusB(cls, x, b):
return x * x + b
@classmethod
def XSquarePlusBGradient(cls, x, b, g):
# Perturb gradients (multiply by 2), so we can test that this was called.
g *= 2.0
return g * 2.0 * x, g
@classmethod
def _PythonGradient(cls, op, grad):
# Perturb gradients (multiply by 3), so we can test that this was called.
grad *= 3.0
return grad * op.inputs[0] * 2.0, grad
@classmethod
def _GetFunc(cls, **kwargs):
return function.Defun(dtypes.float32, dtypes.float32, **
kwargs)(cls.XSquarePlusB)
def _GetFuncGradients(self, f, x_value, b_value):
x = constant_op.constant(x_value, name="x")
b = constant_op.constant(b_value, name="b")
y = f(x, b)
grads = gradients.gradients(y, [x, b])
with self.test_session() as sess:
return sess.run(grads)
def testFunctionGradientsBasic(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc()
# Get gradients (should add SymbolicGradient node for function).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0], grads[0])
self.assertAllEqual([1.0], grads[1])
def testFunctionGradientsComposition(self):
with ops.Graph().as_default():
f = self._GetFunc()
x = constant_op.constant([2.0], name="x")
b1 = constant_op.constant([1.0], name="b1")
b2 = constant_op.constant([1.0], name="b2")
y = f(f(x, b1), b2)
# Build gradient graph (should add SymbolicGradient node for function).
grads = gradients.gradients(y, [x, b1])
with self.test_session() as sess:
self.assertAllEqual([40.0], sess.run(grads)[0])
self.assertAllEqual([10.0], sess.run(grads)[1])
def testFunctionGradientsWithGradFunc(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
f = self._GetFunc(grad_func=grad_func)
# Get gradients (should add SymbolicGradient node for function, which
# uses the grad_func above, which multiplies all gradients by 2).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 2], grads[0])
self.assertAllEqual([1.0 * 2], grads[1])
def testFunctionGradientWithRegistration(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc(python_grad_func=self._PythonGradient)
# Get gradients, using the python gradient function. It multiplies the
# gradients by 3.
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 3], grads[0])
self.assertAllEqual([1.0 * 3], grads[1])
def testFunctionGradientWithGradFuncAndRegistration(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
with self.assertRaisesRegexp(ValueError, "Gradient defined twice"):
f = self._GetFunc(
grad_func=grad_func, python_grad_func=self._PythonGradient)
f.add_to_graph(ops.Graph())
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class PreventGradientTest(test_util.TensorFlowTestCase):
def testPreventGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.prevent_gradient(inp)
with self.assertRaisesRegexp(LookupError, "explicitly disabled"):
_ = gradients.gradients(out, inp)
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients_impl._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class HessianTest(test_util.TensorFlowTestCase):
def testHessian1D(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that `hessian` matches. Specifically, the Hessian of
# f(x) = x^T A x is H = A + A^T.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
x_value = rng.randn(m).astype("float32")
hess_value = mat_value + mat_value.T
with self.test_session(use_gpu=True):
mat = constant_op.constant(mat_value)
x = constant_op.constant(x_value)
x_mat_x = math_ops.reduce_sum(x[:, None] * mat * x[None, :])
hess = gradients.hessians(x_mat_x, x)[0]
hess_actual = hess.eval()
self.assertAllClose(hess_value, hess_actual)
def testHessian1D_multi(self):
# Test the computation of the hessian with respect to multiple tensors
m = 4
n = 3
rng = np.random.RandomState([1, 2, 3])
mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)]
x_values = [rng.randn(m).astype("float32") for _ in range(n)]
hess_values = [mat_value + mat_value.T for mat_value in mat_values]
with self.test_session(use_gpu=True):
mats = [constant_op.constant(mat_value) for mat_value in mat_values]
xs = [constant_op.constant(x_value) for x_value in x_values]
xs_mats_xs = [
math_ops.reduce_sum(x[:, None] * mat * x[None, :])
for x, mat in zip(xs, mats)
]
hessians = gradients.hessians(xs_mats_xs, xs)
hessians_actual = [hess.eval() for hess in hessians]
for hess_value, hess_actual in zip(hess_values, hessians_actual):
self.assertAllClose(hess_value, hess_actual)
def testHessianInvalidDimension(self):
for shape in [(10, 10), None]:
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32, shape)
# Expect a ValueError because the dimensions are wrong
with self.assertRaises(ValueError):
gradients.hessians(x, x)
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testIndexedSlicesToTensorList(self):
with self.test_session():
numpy_list = []
dense_list = []
sparse_list = []
for _ in range(3):
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
numpy_list.append(np_val)
dense_list.append(c)
sparse_list.append(c_sparse)
packed_dense = array_ops.stack(dense_list)
packed_sparse = array_ops.stack(sparse_list)
self.assertAllClose(packed_dense.eval(), packed_sparse.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values,
math_ops.cast(c_sparse.indices, dtypes.int64), c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory." in
str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.multiply(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory." in
str(w[0].message))
class OnlyRealGradientsTest(test_util.TensorFlowTestCase):
def testRealOnly(self):
x = constant_op.constant(7+3j, dtype=dtypes.complex64)
y = math_ops.square(x)
with self.assertRaisesRegexp(
TypeError,
r"Gradients of complex tensors must set grad_ys "
r"\(y\.dtype = tf\.complex64\)"):
gradients.gradients(y, x)
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "43a257df028f71910cffa288239d7296",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 84,
"avg_line_length": 38.08581436077058,
"alnum_prop": 0.6381569871706442,
"repo_name": "code-sauce/tensorflow",
"id": "20a5139a2418fd0b1662efa82a4ae2f6b9739dad",
"size": "22436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/gradients_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "171436"
},
{
"name": "C++",
"bytes": "20787826"
},
{
"name": "CMake",
"bytes": "120470"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "Go",
"bytes": "763825"
},
{
"name": "HTML",
"bytes": "554051"
},
{
"name": "Java",
"bytes": "271826"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "32953"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "189498"
},
{
"name": "Python",
"bytes": "17280938"
},
{
"name": "Shell",
"bytes": "332465"
},
{
"name": "TypeScript",
"bytes": "772208"
}
],
"symlink_target": ""
}
|
from test import initialize, frobnicate, exacerbate
#To keep test results tidy don't put any thing we want results for before line 20
def bad1():
frobnicate()
def bad2():
exacerbate()
def good():
exacerbate()
bad2()
bad1()
initialize()
good()
|
{
"content_hash": "fd2d1e26c2e7c2b0b23ac490b29dd393",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 8.5625,
"alnum_prop": 0.6605839416058394,
"repo_name": "github/codeql",
"id": "5268cae9c9a53b26c4b0a8d4840dd88a716639a5",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/ql/test/library-tests/state_tracking/global.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
}
|
"""
Asset Management - Events: Create and update functions.
"""
__author__ = 'Edna Donoughe'
from ooiservices.app.uframe.uframe_tools import (uframe_get_asset_by_uid, get_uframe_event, uframe_put_event,
uframe_postto, uframe_create_cruise, uframe_create_calibration)
from ooiservices.app.uframe.common_tools import (get_event_types, get_supported_event_types, get_event_class)
from ooiservices.app.uframe.common_tools import convert_status_value_for_display
from ooiservices.app.uframe.events_validate_fields import (events_validate_all_required_fields_are_provided,
events_validate_user_required_fields_are_provided)
# Create event.
def create_event_type(request_data):
""" Create a new event. Return new event on success, or raise exception on error.
Response on success:
{
"message" : "Element created successfully.",
"id" : 14501,
"statusCode" : "CREATED"
}
"""
action = 'create'
try:
# Verify minimum required fields to proceed with create (event_type and uid)
# Required field: event_type
if 'eventType' not in request_data:
message = 'No eventType in request data to create event.'
raise Exception(message)
event_type = request_data['eventType']
if event_type not in get_event_types():
message = 'The event type provided %s is invalid.' % event_type
raise Exception(message)
# If event type create/update not yet supported, raise exception.
if event_type not in get_supported_event_types():
message = 'Event type %s \'%s\' is not supported.' % (event_type, action)
raise Exception(message)
# Required field: assetUid
uid = None
if event_type != 'CRUISE_INFO':
if 'assetUid' not in request_data:
message = 'No assetUid in request data to create event %s.' % event_type
raise Exception(message)
uid = request_data['assetUid']
if not uid:
message = 'The assetUid is empty or null, unable to create a %s event.' % event_type
raise Exception(message)
# Event name not really provided by UI, fill with event type unless CALIBRATION event.
if event_type != 'CALIBRATION_DATA':
request_data['eventName'] = event_type
# Validate data fields to ensure required fields are provided for create.
data = events_validate_all_required_fields_are_provided(event_type, request_data, action=action)
events_validate_user_required_fields_are_provided(event_type, data, action=action)
# Add '@class' field to data; remove 'lastModifiedTimestamp' field; ensure eventId is set to -1.
# Get event class
event_class = get_event_class(event_type)
data['@class'] = event_class
if 'lastModifiedTimestamp' in data:
del data['lastModifiedTimestamp']
# Set eventId for create
data['eventId'] = -1
# Create event.
id = 0
id = perform_uframe_create_event(event_type, uid, data)
if id < 1:
message = 'Failed to create %s event for asset with uid %s' % (event_type, uid)
raise Exception(message)
# Get newly created event and return.
event = get_uframe_event(id)
# Post process event content for display.
event = post_process_event(event)
return event
except Exception as err:
message = str(err)
raise Exception(message)
# Prepare event for display.
def post_process_event(event):
""" Process event from uframe before returning for display (in UI).
"""
try:
if not event:
message = 'The event provided for post processing is empty.'
raise Exception(message)
if '@class' in event:
del event['@class']
if 'eventType' in event:
if event['eventType'] == 'ASSET_STATUS':
event['status'] = convert_status_value_for_display(event['status'])
return event
except Exception as err:
message = 'Error post-processing event for display. %s' % str(err)
raise Exception(message)
# Update event.
def update_event_type(id, data):
""" Update an existing event, no success return event, on error raise exception.
"""
debug = False
action = 'update'
try:
# Verify minimum required fields to proceed with update (event_type and uid)
if 'eventId' not in data:
message = 'An event id must be provided in the request data.'
raise Exception(message)
# Required field: event_type
if 'eventType' not in data:
message = 'An event type must be provided in the request data.'
raise Exception(message)
# Get event type, verify if valid event type.
event_type = data['eventType']
if event_type not in get_event_types():
message = 'The event type provided %s is invalid.' % event_type
raise Exception(message)
# If event type create/update not yet supported, raise exception.
if event_type not in get_supported_event_types():
message = 'Event type %s \'%s\' is not supported.' % (event_type, action)
raise Exception(message)
# Event name not really provided by UI, fill with event type unless CALIBRATION event.
if event_type != 'CALIBRATION_DATA':
data['eventName'] = event_type
# Validate data fields to ensure required fields are provided for update.
data = events_validate_all_required_fields_are_provided(event_type, data, action=action)
events_validate_user_required_fields_are_provided(event_type, data, action=action)
# Verify uid provided in data for all event types except CRUISE_INFO.
uid = None
if event_type != 'CRUISE_INFO' and event_type != 'DEPLOYMENT':
# Required field: assetUid
if 'assetUid' not in data:
message = 'No assetUid in request data to update event %s.' % event_type
raise Exception(message)
uid = data['assetUid']
if not uid or uid is None:
message = 'The assetUid provided is empty or null, unable to update event %s.' % event_type
raise Exception(message)
# Verify eventId provided and of type int.
# Required field: eventId
if 'eventId' not in data:
message = 'No eventId in request data to update event %s.' % event_type
raise Exception(message)
if not isinstance(data['eventId'], int):
message = 'The event id value (%r) must be an integer, it is type: %s' % \
(data['eventId'], str(type(data['eventId'])))
raise Exception(message)
if data['eventId'] != id:
message = 'The event id (\'%r\') provided in data is not equal to id (%d) in url.' % (data['eventId'], id)
raise Exception(message)
# Get event class and add @class field to data
event_class = get_event_class(event_type)
data['@class'] = event_class
# Update event in uframe
updated_id = uframe_put_event(event_type, id, data)
if updated_id <= 0:
message = 'Failed to update %s event in uframe for id %d.' % (event_type, id)
raise Exception(message)
if updated_id != id:
message = 'The event id returned from event update (%d) is not equal to original id (%d).' % (updated_id, id)
# Get updated event, return event
event = get_uframe_event(id)
if debug: print '\n event: ', event
if event['eventType'] == 'ASSET_STATUS':
event['status'] = convert_status_value_for_display(event['status'])
if debug: print '\n event[status]: ', event['status']
return event
except Exception as err:
message = str(err)
raise Exception(message)
def perform_uframe_create_event(event_type, uid, data):
""" Create event using uframe interface determined by event type.
"""
try:
if event_type != 'CRUISE_INFO':
if uid is None or not uid:
message = 'Unable to create %s event for asset with uid: \'%s\'.' % (event_type, uid)
raise Exception(message)
# Create cruise_info event using/events/cruise POST
if event_type == 'CRUISE_INFO':
id = uframe_create_cruise(event_type, data)
# Create calibration_data event
elif event_type == 'CALIBRATION_DATA':
if not isinstance(data['eventId'], int):
message = 'The event id value (%r) must be an integer, it is type: %s' % \
(data['eventId'], str(type(data['eventId'])))
raise Exception(message)
id = create_calibration_data_event(event_type, uid, data)
# Create event using /events/postto/uid POST
else:
if event_type == 'DEPLOYMENT':
message = 'Create event type DEPLOYMENT is not supported through the events create/update interface.'
raise Exception(message)
id = uframe_postto(event_type, uid, data)
if id is None or id <= 0:
message = 'Failed to create and retrieve event from uframe for asset uid: \'%s\'. ' % uid
raise Exception(message)
return id
except Exception as err:
message = str(err)
raise Exception(message)
def create_calibration_data_event(event_type, uid, data):
success_codes = [201, 204]
try:
# create calibration data using /assets/cal POST
event_name = None
if 'eventName' in data:
event_name = data['eventName']
if calibration_data_exists(uid, event_name):
message = 'Calibration data event name \'%s\' exists for asset with uid \'%s\'.' % (event_name, uid)
raise Exception(message)
status_code = uframe_create_calibration(event_type, uid, data)
if status_code not in success_codes:
message = 'Failed to create calibration data for asset uid \'%s\', event name \'%s\'.' % (uid, event_name)
raise Exception(message)
# Get eventId for calibration data event where eventName is event_name and asset uid is uid.
id, _ = get_calibration_event_id(uid, event_name)
return id
except Exception as err:
message = str(err)
raise Exception(message)
def get_calibration_event_id(uid, event_name):
"""
"calibration" : [ {
"@class" : ".XCalibration",
"name" : "CC_a1",
"calData" : [ {
"@class" : ".XCalibrationData",
"values" : [ -1.493703E-4 ],
"dimensions" : [ 1 ],
"cardinality" : 0,
"comments" : "Test entry",
"eventId" : 31534,
"assetUid" : "A01682",
"eventType" : "CALIBRATION_DATA",
"eventName" : "CC_a1",
"eventStartTime" : 1443614400000,
"eventStopTime" : null,
"notes" : null,
"dataSource" : "API:createCalibration:2016-08-31T22:37:22.096Z",
"lastModifiedTimestamp" : 1472683042096
} ]
} ],
"""
id = None
last_modified = None
try:
asset = uframe_get_asset_by_uid(uid)
calibrations = asset['calibration']
for cal in calibrations:
if 'name' in cal:
if cal['name'] == event_name:
# Get eventId
if 'calData' in cal:
for item in cal['calData']:
if 'eventId' in item:
id = item['eventId']
last_modified = item['lastModifiedTimestamp']
break
if id is None:
message = 'Failed to locate calibration name \'%s\' in asset with uid %s.' % (event_name, uid)
raise Exception(message)
return id, last_modified
except Exception as err:
message = str(err)
raise Exception(message)
def calibration_data_exists(uid, event_name):
""" Determine if calibration data contains event name. Return True or False.
"""
result = False
try:
try:
event_id, _ = get_calibration_event_id(uid, event_name)
except:
event_id = 0
if event_id > 0:
result = True
return result
except Exception as err:
message = str(err)
raise Exception(message)
|
{
"content_hash": "d662980b79c1d48a53d5d1878b71aa89",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 121,
"avg_line_length": 40.27444794952682,
"alnum_prop": 0.5856505052087413,
"repo_name": "asascience-open/ooi-ui-services",
"id": "b33d269ae1fce1bb5f1014af1b8af225fb03afbd",
"size": "12767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ooiservices/app/uframe/events_create_update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3233848"
},
{
"name": "TSQL",
"bytes": "5336"
}
],
"symlink_target": ""
}
|
num = input('Enter number: ')
#Diplay the output
print('The binary equivalent is {0}'.format(bin(int(num))[2:]))
|
{
"content_hash": "9dee17261ba430b992299770aaf690a6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 63,
"avg_line_length": 28.25,
"alnum_prop": 0.6814159292035398,
"repo_name": "HarendraSingh22/Python-Guide-for-Beginners",
"id": "8cbe92b3be95dbe88ef455a19303ef222fe79789",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/binary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6480"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basicviz', '0003_auto_20160717_1943'),
]
operations = [
migrations.AddField(
model_name='document',
name='metadata',
field=models.CharField(max_length=1024, null=True),
preserve_default=True,
),
]
|
{
"content_hash": "5133a7c4f2f3bc90da646582d90cfeae",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 22.57894736842105,
"alnum_prop": 0.5944055944055944,
"repo_name": "sdrogers/ms2ldaviz",
"id": "cd77e6fbd8394f12ae210fde89ddd5e79fb63e21",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ms2ldaviz/basicviz/migrations/0004_document_metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155389"
},
{
"name": "Dockerfile",
"bytes": "324"
},
{
"name": "HTML",
"bytes": "281089"
},
{
"name": "JavaScript",
"bytes": "564464"
},
{
"name": "Jupyter Notebook",
"bytes": "22354299"
},
{
"name": "Python",
"bytes": "897444"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
}
|
"""Traceback utilities."""
import dis
import keyword
import opcode
import os
import sys
import tokenize
from pysec.core import Object
from pysec.io import fd
from pysec.xsplit import xlines
NORESULT = object()
SCOPE_BUILTIN = 'builtin'
SCOPE_GLOBAL = 'global'
SCOPE_LOCAL = 'local'
def getvar(name, frame=None):
"""Search a name in local scope, global scope, and in builtins"""
if frame is None:
frame = sys._getframe().f_back
val = frame.f_locals.get(name, NORESULT)
if val is not NORESULT:
return SCOPE_LOCAL, val
val = frame.f_globals.get(name, NORESULT)
if val is not NORESULT:
return SCOPE_GLOBAL, val
builtins = frame.f_globals.get('__builtins__', NORESULT)
if builtins is not NORESULT:
if type(builtins) is type({}):
val = builtins.get(name, NORESULT)
if val is not NORESULT:
return SCOPE_BUILTIN, val
else:
val = getattr(builtins, name, NORESULT)
if val is not NORESULT:
return SCOPE_BUILTIN, val
return None, None
class StringReadline(Object):
def __init__(self, text):
self.lines = xlines(text, eol='\n', keep_eol=1)
def readline(self):
try:
return self.lines.next()
except StopIteration:
return ''
NOVAL = object()
def linevars(code, frame=None):
if frame is None:
frame = sys._getframe().f_back
last = None
parent = None
prefix = ''
value = NOVAL
for tok_type, token, start, end, line in tokenize.generate_tokens(StringReadline(code).readline):
if tok_type == tokenize.NEWLINE:
break
elif tok_type == tokenize.NAME and token not in keyword.kwlist:
if last == '.':
if parent is not NOVAL:
value = getattr(parent, token, NOVAL)
yield prefix + token, prefix, value
else:
where, value = getvar(token, frame)
yield token, where, value
elif token == '.':
prefix = '%s%s.' % (prefix, last)
parent = value
else:
parent = None
prefix = ''
last = token
class Hook(Object):
def __init__(self, formatter, out=sys.stderr):
self.out = out
self.formatter = formatter
def __call__(self, exc_type, exc_val, exc_tb):
self.handle((exc_type, exc_val, exc_tb))
def handle(self, info=None):
self.out.write(self.formatter(*(info or sys.exc_info())))
self.out.flush()
def set_excepthook(fmt, out=sys.stderr):
sys.excepthook = Hook(fmt, out)
def reset_excepthook():
sys.excepthook = sys.__excepthook__
# Utilities to format traceback
# inspired by dis.disassemble() in Python 2.7
def disassemble(co):
code = co.co_code
labels = dis.findlabels(code)
linestarts = dict(dis.findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
lineno = None
while i < n:
c = code[i]
op = ord(c)
lineno = linestarts.get(i, lineno)
is_label = i in labels
ist = i
i += 1
if op >= opcode.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i += 2
if op == opcode.EXTENDED_ARG:
extended_arg = oparg * 65536L
if op in opcode.hasconst:
arg = co.co_consts[oparg]
elif op in opcode.hasname:
arg = co.co_names[oparg]
elif op in opcode.hasjrel:
arg = i + oparg
elif op in opcode.haslocal:
arg = co.co_varnames[oparg]
elif op in opcode.hascompare:
arg = opcode.cmp_op[oparg]
elif op in opcode.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
arg = free[oparg]
else:
arg = NOVAL
else:
arg = NOVAL
yield ist, lineno, is_label, opcode.opname[op], arg
def short_tb(exc_type, exc_value, exc_tb):
traceback = []
while exc_tb:
traceback.append('{%r, %r, %r}' % (exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_frame.f_code.co_name,
exc_tb.tb_lineno))
exc_tb = exc_tb.tb_next
return 'Traceback: %s\nError: %s %r\n' % (' -> '.join(traceback), exc_type.__name__, str(exc_value))
def long_tb(exc_type, exc_value, exc_tb, max_length=80):
traceback = ['Traceback (most recent call last):']
lvl = 0
while exc_tb:
path = os.path.abspath(exc_tb.tb_frame.f_code.co_filename)
lineno = exc_tb.tb_lineno - 1
traceback.append('[%d]' % lvl)
traceback.append(' Where: %r:%d %r' % (path, lineno, exc_tb.tb_frame.f_code.co_name))
with fd.File.open(path, fd.FO_READEX) as src:
line = src.get_line(lineno)
traceback.append(' Line: %r' % line.strip())
traceback.append(' Variables:')
for token, where, val in linevars(line.strip(), exc_tb.tb_frame):
if where is None:
val = '<undefined>'
val = repr(val)
traceback.append(' %r: %s' % (token, '%s...' % val[:max_length] if len(val) > max_length else val))
exc_tb = exc_tb.tb_next
lvl += 1
return '%s\n[ERROR]\n%s: %r\n' % ('\n'.join(traceback), exc_type.__name__, str(exc_value))
def deep_tb(exc_type, exc_value, exc_tb):
traceback = ['=== Traceback (most recent call last) ===']
lvl = 0
prev = None
while exc_tb:
path = os.path.abspath(exc_tb.tb_frame.f_code.co_filename)
lineno = exc_tb.tb_lineno
traceback.append('[%d]' % lvl)
traceback.append(' File: %r' % path)
traceback.append(' Function: %r' % exc_tb.tb_frame.f_code.co_name)
with fd.File.open(path, fd.FO_READEX) as src:
line = src.get_line(lineno - 1)
traceback.append(' Line: (%d) %r' % (lineno, line.strip()))
traceback.append(' Variables:')
for token, where, val in linevars(line.strip(), exc_tb.tb_frame):
if where is None:
val = '<undefined>'
traceback.append(' %r: %r' % (token, val))
traceback.append(' Code:')
for ist, lineno, label, op, arg in disassemble(exc_tb.tb_frame.f_code):
prefix = ' >> ' if ist == exc_tb.tb_lasti else ' '
postfix = ' << %s' % exc_type.__name__ if ist == exc_tb.tb_lasti else ''
if lineno == exc_tb.tb_lineno:
if arg is NOVAL:
traceback.append(' %s%s%s' % (prefix, op, postfix))
else:
traceback.append(' %s%s %r%s' % (prefix, op, arg, postfix))
prev = exc_tb
exc_tb = exc_tb.tb_next
lvl += 1
traceback.append('[ERROR]')
traceback.append('%s: %r' % (exc_type.__name__, str(exc_value)))
traceback.append('=========================================\n')
return '\n'.join(traceback)
|
{
"content_hash": "2da13a86ee72f379bfd8a65cee91edac",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 114,
"avg_line_length": 32.944954128440365,
"alnum_prop": 0.5367585630743525,
"repo_name": "chencoyote/owasp-pysec",
"id": "8147e878e0615130148f7bbe273cece140dd3923",
"size": "7932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysec/tb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.openstack.common import timeutils
from nova import utils
FIXED_IP_OPTIONAL_ATTRS = ['instance', 'network']
class FixedIP(obj_base.NovaPersistentObject, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added virtual_interface field
# Version 1.2: Instance version 1.14
# Version 1.3: Instance 1.15
# Version 1.4: Added default_route field
VERSION = '1.4'
fields = {
'id': fields.IntegerField(),
'address': fields.IPV4AndV6AddressField(),
'network_id': fields.IntegerField(nullable=True),
'virtual_interface_id': fields.IntegerField(nullable=True),
'instance_uuid': fields.UUIDField(nullable=True),
'allocated': fields.BooleanField(),
'leased': fields.BooleanField(),
'reserved': fields.BooleanField(),
'host': fields.StringField(nullable=True),
'default_route': fields.BooleanField(),
'instance': fields.ObjectField('Instance', nullable=True),
'network': fields.ObjectField('Network', nullable=True),
'virtual_interface': fields.ObjectField('VirtualInterface',
nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 4) and 'default_route' in primitive:
del primitive['default_route']
if target_version < (1, 3) and 'instance' in primitive:
self.instance.obj_make_compatible(
primitive['instance']['nova_object.data'], '1.14')
primitive['instance']['nova_object.version'] = '1.14'
if target_version < (1, 2) and 'instance' in primitive:
self.instance.obj_make_compatible(
primitive['instance']['nova_object.data'], '1.13')
primitive['instance']['nova_object.version'] = '1.13'
@property
def floating_ips(self):
return objects.FloatingIPList.get_by_fixed_ip_id(self._context,
self.id)
@staticmethod
def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for field in fixedip.fields:
if field in ('virtual_interface', 'default_route'):
# NOTE(danms): These fields are only set when doing a
# FixedIPList.get_by_network() because it's a relatively
# special-case thing, so skip them here
continue
if field not in FIXED_IP_OPTIONAL_ATTRS:
fixedip[field] = db_fixedip[field]
# NOTE(danms): Instance could be deleted, and thus None
if 'instance' in expected_attrs:
fixedip.instance = objects.Instance._from_db_object(
context,
objects.Instance(context),
db_fixedip['instance']) if db_fixedip['instance'] else None
if 'network' in expected_attrs:
fixedip.network = objects.Network._from_db_object(
context, objects.Network(context), db_fixedip['network'])
fixedip._context = context
fixedip.obj_reset_changes()
return fixedip
@obj_base.remotable_classmethod
def get_by_id(cls, context, id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
get_network = 'network' in expected_attrs
db_fixedip = db.fixed_ip_get(context, id, get_network=get_network)
return cls._from_db_object(context, cls(context), db_fixedip,
expected_attrs)
@obj_base.remotable_classmethod
def get_by_address(cls, context, address, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
db_fixedip = db.fixed_ip_get_by_address(context, str(address),
columns_to_join=expected_attrs)
return cls._from_db_object(context, cls(context), db_fixedip,
expected_attrs)
@obj_base.remotable_classmethod
def get_by_floating_address(cls, context, address):
db_fixedip = db.fixed_ip_get_by_floating_address(context, str(address))
if db_fixedip is not None:
return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def get_by_network_and_host(cls, context, network_id, host):
db_fixedip = db.fixed_ip_get_by_network_host(context, network_id, host)
return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def associate(cls, context, address, instance_uuid, network_id=None,
reserved=False):
db_fixedip = db.fixed_ip_associate(context, address, instance_uuid,
network_id=network_id,
reserved=reserved)
return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def associate_pool(cls, context, network_id, instance_uuid=None,
host=None):
db_fixedip = db.fixed_ip_associate_pool(context, network_id,
instance_uuid=instance_uuid,
host=host)
return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def disassociate_by_address(cls, context, address):
db.fixed_ip_disassociate(context, address)
@obj_base.remotable_classmethod
def _disassociate_all_by_timeout(cls, context, host, time_str):
time = timeutils.parse_isotime(time_str)
return db.fixed_ip_disassociate_all_by_timeout(context, host, time)
@classmethod
def disassociate_all_by_timeout(cls, context, host, time):
return cls._disassociate_all_by_timeout(context, host,
timeutils.isotime(time))
@obj_base.remotable
def create(self, context):
updates = self.obj_get_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='already created')
if 'address' in updates:
updates['address'] = str(updates['address'])
db_fixedip = db.fixed_ip_create(context, updates)
self._from_db_object(context, self, db_fixedip)
@obj_base.remotable
def save(self, context):
updates = self.obj_get_changes()
if 'address' in updates:
raise exception.ObjectActionError(action='save',
reason='address is not mutable')
db.fixed_ip_update(context, str(self.address), updates)
self.obj_reset_changes()
@obj_base.remotable
def disassociate(self, context):
db.fixed_ip_disassociate(context, str(self.address))
self.instance_uuid = None
self.instance = None
self.obj_reset_changes(['instance_uuid', 'instance'])
class FixedIPList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_network()
# Version 1.2: FixedIP <= version 1.2
# Version 1.3: FixedIP <= version 1.3
# Version 1.4: FixedIP <= version 1.4
VERSION = '1.4'
fields = {
'objects': fields.ListOfObjectsField('FixedIP'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
'1.2': '1.2',
'1.3': '1.3',
'1.4': '1.4',
}
@obj_base.remotable_classmethod
def get_all(cls, context):
db_fixedips = db.fixed_ip_get_all(context)
return obj_base.obj_make_list(context, cls(context),
objects.FixedIP, db_fixedips)
@obj_base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_fixedips = db.fixed_ip_get_by_instance(context, instance_uuid)
return obj_base.obj_make_list(context, cls(context),
objects.FixedIP, db_fixedips)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_fixedips = db.fixed_ip_get_by_host(context, host)
return obj_base.obj_make_list(context, cls(context),
objects.FixedIP, db_fixedips)
@obj_base.remotable_classmethod
def get_by_virtual_interface_id(cls, context, vif_id):
db_fixedips = db.fixed_ips_by_virtual_interface(context, vif_id)
return obj_base.obj_make_list(context, cls(context),
objects.FixedIP, db_fixedips)
@obj_base.remotable_classmethod
def get_by_network(cls, context, network, host=None):
ipinfo = db.network_get_associated_fixed_ips(context,
network['id'],
host=host)
if not ipinfo:
return []
fips = cls(context=context, objects=[])
for info in ipinfo:
inst = objects.Instance(context=context,
uuid=info['instance_uuid'],
hostname=info['instance_hostname'],
created_at=info['instance_created'],
updated_at=info['instance_updated'])
vif = objects.VirtualInterface(context=context,
id=info['vif_id'],
address=info['vif_address'])
fip = objects.FixedIP(context=context,
address=info['address'],
instance_uuid=info['instance_uuid'],
network_id=info['network_id'],
virtual_interface_id=info['vif_id'],
allocated=info['allocated'],
leased=info['leased'],
default_route=info['default_route'],
instance=inst,
virtual_interface=vif)
fips.objects.append(fip)
fips.obj_reset_changes()
return fips
@obj_base.remotable_classmethod
def bulk_create(self, context, fixed_ips):
ips = []
for fixedip in fixed_ips:
ip = obj_base.obj_to_primitive(fixedip)
if 'id' in ip:
raise exception.ObjectActionError(action='create',
reason='already created')
ips.append(ip)
db.fixed_ip_bulk_create(context, ips)
|
{
"content_hash": "10905b796530867653bb0698ed29a5b8",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 79,
"avg_line_length": 43.63241106719368,
"alnum_prop": 0.5633662469426578,
"repo_name": "nash-x/hws",
"id": "2482b635e5a59b8bbf0b0e3c5567c9fe3f606154",
"size": "11648",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/objects/fixed_ip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "PLpgSQL",
"bytes": "12782"
},
{
"name": "Python",
"bytes": "20443623"
},
{
"name": "Shell",
"bytes": "4643"
}
],
"symlink_target": ""
}
|
'''A domain specific language for timeseries analysis and manipulation.
Created using ply (http://www.dabeaz.com/ply/) a pure Python implementation
of the popular compiler construction tools lex and yacc.
'''
from ccy import todate
from ..conf import settings
from ..exc import ExpressionError, CouldNotParse
from ..api.timeseries import is_timeseries, ts_merge
from ..api.scatter import is_scatter
from ..data import providers
from .functions import function_registry
from .rules import parsefunc
def parse(timeseries_expression, method=None, functions=None, debug=False):
'''Function for parsing :ref:`timeseries expressions <dsl-script>`.
If succesful, it returns an instance of :class:`dynts.dsl.Expr` which
can be used to to populate timeseries or scatters once data is available.
Parsing is implemented using the ply_ module,
an implementation of lex and yacc parsing tools for Python.
:parameter expression: A :ref:`timeseries expressions <dsl-script>` string.
:parameter method: Not yet used.
:parameter functions: dictionary of functions to use when parsing.
If not provided the :data:`dynts.function_registry`
will be used.
Default ``None``.
:parameter debug: debug flag for ply_. Default ``False``.
For examples and usage check the :ref:`dsl documentation <dsl>`.
.. _ply: http://www.dabeaz.com/ply/
'''
if not parsefunc:
raise ExpressionError('Could not parse. No parser installed.')
functions = functions if functions is not None else function_registry
expr_str = str(timeseries_expression).lower()
return parsefunc(expr_str, functions, method, debug)
def evaluate(expression, start=None, end=None, loader=None, logger=None,
backend=None, **kwargs):
'''Evaluate a timeseries ``expression`` into
an instance of :class:`dynts.dsl.dslresult` which can be used
to obtain timeseries and/or scatters.
This is probably the most used function of the library.
:parameter expression: A timeseries expression string or an instance
of :class:`dynts.dsl.Expr` obtained using the :func:`~.parse`
function.
:parameter start: Start date or ``None``.
:parameter end: End date or ``None``. If not provided today values is used.
:parameter loader: Optional :class:`dynts.data.TimeSerieLoader`
class or instance to use.
Default ``None``.
:parameter logger: Optional python logging instance, used if you required
logging.
Default ``None``.
:parameter backend: :class:`dynts.TimeSeries` backend name or ``None``.
The ``expression`` is parsed and the :class:`~.Symbol` are sent to the
:class:`dynts.data.TimeSerieLoader` instance for retrieving
actual timeseries data.
It returns an instance of :class:`~.DSLResult`.
Typical usage::
>>> from dynts import api
>>> r = api.evaluate('min(GS,window=30)')
>>> r
min(GS,window=30)
>>> ts = r.ts()
'''
if isinstance(expression, str):
expression = parse(expression)
if not expression or expression.malformed():
raise CouldNotParse(expression)
symbols = expression.symbols()
start = start if not start else todate(start)
end = end if not end else todate(end)
data = providers.load(symbols, start, end, loader=loader,
logger=logger, backend=backend, **kwargs)
return DSLResult(expression, data, backend=backend)
class DSLResult:
'''Class holding the results of an interpreted expression.
Instances of this class are returned when invoking the
:func:`dynts.evaluate` high level function.
.. attribute:: expression
An instance of :class:`dynts.dsl.Expr` obtained when interpreting a
timesries expression string via :func:`dynts.parse`.
.. attribute:: data
data which is used to populate timeseries or scatters.
.. attribute:: backend
backend used when populating timeseries.
'''
def __init__(self, expression, data, backend = None):
self.expression = expression
self.data = data
self.backend = backend or settings.backend
def __repr__(self):
return self.expression.__repr__()
def __str__(self):
return self.__repr__()
def unwind(self):
if not hasattr(self, '_ts'):
self._unwind()
return self
def ts(self):
'''The associated timeseries, if available.'''
self.unwind()
return self._ts
def xy(self):
'''The associated scatters, if available.'''
self.unwind()
return self._xy
def _unwind(self):
res = self.expression.unwind(self.data, self.backend)
self._ts = None
self._xy = None
if is_timeseries(res):
self._ts = res
elif res and isinstance(res,list):
tss = []
xys = []
for v in res:
if is_timeseries(v):
tss.append(v)
elif is_scatter(v):
xys.append(v)
if tss:
self._ts = ts_merge(tss)
if xys:
self._xy = xys
elif is_scatter(res):
self._xy = res
def dump(self, format, **kwargs):
ts = self.ts()
xy = self.xy()
if is_timeseries(ts):
ts = ts.dump(format, **kwargs)
else:
ts = None
if xy:
if is_scatter(xy):
xy = [xy]
for el in xy:
ts = el.dump(format, container=ts, **kwargs)
return ts
|
{
"content_hash": "9fd9a67b6548afa0dc255aaa3aa9dba5",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 79,
"avg_line_length": 34.21764705882353,
"alnum_prop": 0.6064981949458483,
"repo_name": "quantmind/dynts",
"id": "d042e8b3876e0957b983cd822cce617b59dcaf43",
"size": "5817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynts/dsl/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "202792"
},
{
"name": "Visual Basic",
"bytes": "11474"
}
],
"symlink_target": ""
}
|
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
# from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from deck.tests.test_unit import EVENT_DATA
from deck.models import Event
class JuryTest(TestCase):
fixtures = ['user.json', 'socialapp.json']
def setUp(self):
self.client = Client()
self.event_data = EVENT_DATA.copy()
self.client.login(username='admin', password='admin')
def test_event_invite_to_jury(self):
event = Event.objects.create(**self.event_data)
user = User.objects.get(username='user')
invite_url = reverse('event_invite_to_jury',
kwargs={'slug': event.slug})
response = self.client.post(invite_url,
{'email': user.email},
follow=True)
message = _(u'The "@%s" are successfully joined to the Jury.') % user
self.assertIn(six.text_type(message), six.text_type(response.content))
self.assertEquals(2, event.jury.users.count())
self.assertQuerysetEqual(event.jury.users.all(),
['<User: admin>', '<User: user>'],
ordered=False)
def test_event_invite_to_jury_an_already_joined_user(self):
event = Event.objects.create(**self.event_data)
email = {'email': 'admin@speakerfight.com'}
invite_url = reverse('event_invite_to_jury',
kwargs={'slug': event.slug})
response = self.client.post(invite_url, email, follow=True)
message = _(u'The "@admin" already is being part of this jury.')
self.assertIn(six.text_type(message), six.text_type(response.content))
self.assertEquals(1, event.jury.users.count())
self.assertQuerysetEqual(event.jury.users.all(),
['<User: admin>'],
ordered=False)
def test_event_invite_to_jury_a_not_speakerfight_user(self):
event = Event.objects.create(**self.event_data)
email = {'email': 'new_user@speakerfight.com'}
invite_url = reverse('event_invite_to_jury',
kwargs={'slug': event.slug})
response = self.client.post(invite_url, email, follow=True)
message = _(u'The "new_user@speakerfight.com" are not a Speakerfight '
u'user. For now, we just allow already joined users.')
self.assertIn(six.text_type(message), six.text_type(response.content))
self.assertEquals(1, event.jury.users.count())
self.assertQuerysetEqual(event.jury.users.all(),
['<User: admin>'],
ordered=False)
def test_event_remove_from_jury(self):
event = Event.objects.create(**self.event_data)
user = User.objects.get(username='user')
event.jury.users.add(user)
remove_from_jury = reverse('event_remove_from_jury',
kwargs={'slug': event.slug,
'user_pk': user.pk})
response = self.client.post(remove_from_jury,
{'email': user.email},
follow=True)
message = _(u'The "@user" was successfully removed from the Jury.')
self.assertIn(six.text_type(message), six.text_type(response.content))
self.assertEquals(1, event.jury.users.count())
self.assertQuerysetEqual(event.jury.users.all(),
['<User: admin>'],
ordered=False)
|
{
"content_hash": "289e2027c4182ce804231b51d95f50b7",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 46.82716049382716,
"alnum_prop": 0.5670972844713946,
"repo_name": "luanfonceca/speakerfight",
"id": "aabaee07dae9456e91b928a9eb64a313261d5879",
"size": "3793",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jury/tests/test_functional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64338"
},
{
"name": "Dockerfile",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "114755"
},
{
"name": "JavaScript",
"bytes": "65026"
},
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "Python",
"bytes": "217175"
}
],
"symlink_target": ""
}
|
"""This module provides a bunch of helper functions to deal with user
registration stuff."""
from models import Event, Registration
from django.contrib.auth.models import User
from datetime import datetime, timedelta
def is_waitlisted(student, event):
"""Checks to see if a student is waitlisted for a given event."""
if student == None:
return False
student_registrations = Registration.objects.filter(
event=event,
student=student,
waitlist=True,
cancelled=False)
if len(student_registrations) > 0:
return True
else:
return False
def is_registered(student, event):
"""Checks to see if a student is registered for a given event."""
if student == None:
return False
student_registrations = Registration.objects.filter(
event=event,
student=student,
waitlist=False,
cancelled=False)
if len(student_registrations) > 0:
return True
else:
return False
def is_cancelled(student, event):
"""Checks to see if a student has cancelled for a given event."""
if student == None:
return False
student_registrations = Registration.objects.filter(
event=event,
student=student,
cancelled=False)
if len(student_registrations) > 0:
return False
else:
return True
def cancel_registration(student, event):
"""Cancels the users registration for an event."""
registration = Registration.objects.filter(
event=event,
student=student,
cancelled=False)[0]
registration.date_cancelled=datetime.now()
registration.cancelled=True
registration.save()
def promote_waitlistee(event):
"""Promotes a waitlisted student to registered for an event."""
registrations = Registration.objects.filter(
event=event,
waitlist=True,
cancelled=False)
if len(registrations) == 0:
return None
registration = registrations[0]
print "registration: "+str(registration)
registration.waitlist=False
registration.date_promoted=datetime.now()
registration.late_promotion=event.is_late_promotion
registration.save()
return registration.student
|
{
"content_hash": "3e7cc3a3ee5949793809242edf931587",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 69,
"avg_line_length": 24.416666666666668,
"alnum_prop": 0.7313505607020966,
"repo_name": "knowledgecommonsdc/kcdc3",
"id": "48b3d66103348a0ab92d076757864743fcc9b997",
"size": "2051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kcdc3/apps/classes/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "117417"
},
{
"name": "HTML",
"bytes": "169043"
},
{
"name": "JavaScript",
"bytes": "224601"
},
{
"name": "Pascal",
"bytes": "2537"
},
{
"name": "Perl",
"bytes": "41847"
},
{
"name": "Puppet",
"bytes": "57668"
},
{
"name": "Python",
"bytes": "731320"
},
{
"name": "Ruby",
"bytes": "323384"
},
{
"name": "Shell",
"bytes": "3935"
}
],
"symlink_target": ""
}
|
import sys
import subprocess
import ctypes
from unittest import TestCase
from . import glk_test_program
import zvm.glk as glk
class AnyGlkTestsMixIn:
"""Integration tests that can be used with any Glk library. Aside
from serving as integration tests, they also ensure that the Glk
specification is observed precisely, and as such they can be used
to validate the integrity of any Glk library in question.
This is a mix-in class, intended to be mixed-in with an existing
subclass of unittest.TestCase. Subclasses should be sure to set
up the glkLib attribute of the class to point to a Glk library, or
else lots of AttributeError exceptions will occur."""
def testGestaltCharInputProvidesValidInformation(self):
# This tests Section 2.3 of the Glk spec 0.7.0, ensuring that
# all characters and special key codes are reported to be
# typable or untypable for single-key input.
for char in range(0, 1000):
result = self.glkLib.glk_gestalt(glk.gestalt_CharInput, char)
assert result == glk.TRUE or result == glk.FALSE
keycodes = [ getattr(glk, keycode)
for keycode in list(glk.__dict__.keys())
if keycode.startswith("keycode_") ]
for keycode in keycodes:
result = self.glkLib.glk_gestalt(glk.gestalt_CharInput, keycode)
assert result == glk.TRUE or result == glk.FALSE
def testGestaltLineInputProvidesValidInformation(self):
# This tests Section 2.2 of the Glk spec 0.7.0, ensuring that
# all characters are reported to be either typable or
# untypable for line input.
for char in range(0, 1000):
result = self.glkLib.glk_gestalt(glk.gestalt_LineInput, char)
assert result == glk.TRUE or result == glk.FALSE
def testGestaltLineInputReportsAsciiCharsAreTypable(self):
# This tests Section 2.2 of the Glk spec 0.7.0, ensuring that
# the ASCII character set is guaranteed to be typable by the
# end-user for line input.
for char in range(32, 127):
result = self.glkLib.glk_gestalt(glk.gestalt_LineInput, char)
self.assertEqual(result, glk.TRUE)
def testGestaltLineInputReportsProperCharsAreUntypable(self):
# This tests Section 2.2 of the Glk spec 0.7.0, ensuring that
# certain characters are reported to be untypable by the
# end-user for line input.
untypableCharacters = (list(range(0, 32)) +
list(range(127, 160)) +
list(range(256, 1000)))
for char in untypableCharacters:
result = self.glkLib.glk_gestalt(glk.gestalt_LineInput, char)
self.assertEqual(result, glk.FALSE)
def testGestaltCharOutputReportsProperCharsAreUnprintable(self):
# This tests Section 2.1 of the Glk spec 0.7.0, ensuring that
# certain characters are reported to be unprintable.
unprintableCharacters = (list(range(0, 10)) +
list(range(11, 32)) +
list(range(127, 160)) +
list(range(256, 1000)))
for char in unprintableCharacters:
numGlyphs = glk.glui32()
result = self.glkLib.glk_gestalt_ext(
glk.gestalt_CharOutput,
char,
ctypes.pointer(numGlyphs),
1
)
self.assertEqual(result, glk.gestalt_CharOutput_CannotPrint)
def testGestaltCharOutputProvidesValidInformation(self):
# This tests Section 2.1 of the Glk spec 0.7.0, ensuring the
# integrity of the information provided by the CharOutput
# gestalt.
for char in range(0, 1000):
numGlyphs = glk.glui32()
result = self.glkLib.glk_gestalt_ext(
glk.gestalt_CharOutput,
char,
ctypes.pointer(numGlyphs),
1
)
# Ensure that the result is a valid value.
if result == glk.gestalt_CharOutput_CannotPrint:
pass
elif result == glk.gestalt_CharOutput_ExactPrint:
# Any character that can be printed exactly should
# only be one glyph long.
self.assertEqual(numGlyphs.value, 1)
elif result == glk.gestalt_CharOutput_ApproxPrint:
# Any character that can be printed approximately
# should be at least one glyph long.
assert numGlyphs.value >= 1
else:
raise AssertionError( "result is invalid." )
def testGestaltRandomCombinationsWork(self):
# Section 1.7 of the the Glk spec 0.7.0 says that calling
# glk_gestalt(x, y) is equivalent to calling
# glk_gestalt_ext(x, y, NULL, 0), and it also says that it's
# valid for x and y to be any value, so we will test both of
# these things here.
for x in range(0, 100):
for y in range(0, 100):
self.assertEqual(
self.glkLib.glk_gestalt(x, y),
self.glkLib.glk_gestalt_ext(x, y, glk.NULL, 0)
)
class CheapGlkTests(TestCase, AnyGlkTestsMixIn):
"""CheapGlk-specific tests."""
def setUp(self):
self.glkLib = glk_test_program.CheapGlkLibrary()
def testGestaltVersionWorks(self):
CHEAP_GLK_VERSION = 0x705
self.assertEqual(
self.glkLib.glk_gestalt(glk.gestalt_Version, 0),
CHEAP_GLK_VERSION
)
self.assertEqual(
self.glkLib.glk_gestalt_ext(glk.gestalt_Version, 0, glk.NULL, 0),
CHEAP_GLK_VERSION
)
def testGlkProgramWorks(self):
glk_program = subprocess.Popen(
args = [sys.executable, glk_test_program.__file__],
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
universal_newlines = True
)
stdout, stderr = glk_program.communicate("quit\n")
self.assertEqual(glk_program.returncode, 0)
assert "Hello, world!" in stdout
assert "Goodbye, world!" in stdout
assert len(stderr) == 0
|
{
"content_hash": "1f5ffc0e512ecb971bf29256759414fb",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 77,
"avg_line_length": 39.73125,
"alnum_prop": 0.6009123800534844,
"repo_name": "sussman/zvm",
"id": "d8e41835066a3cb6e8e188c8aa72dc35f5ed92d9",
"size": "6522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/glk_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1168037"
},
{
"name": "C",
"bytes": "291502"
},
{
"name": "Python",
"bytes": "184777"
}
],
"symlink_target": ""
}
|
import numpy as np
class LoLTypeInference:
"""
Given a list of lists this class infers the types of
each of the attributes.
"""
def __init__(self, cat_thresh=1000,
num_thresh = 0.25,
addr_thresh=0.25):
"""
cat_thresh parsing threshold for categorical attrs
num_thresh parsing threshold for numerical attributes
"""
self.cat_thresh = cat_thresh
self.num_thresh = num_thresh
self.addr_thresh = addr_thresh
def getDataTypes(self, data):
"""
Given data in a list of lists format this returns a list
of attribute types
"""
num_attributes = len(data[0])
type_array = []
for i in range(0, num_attributes):
#if self.__is_addr(data, i):
# type_array.append('address')
if self.__is_num(data, i):
type_array.append('numerical')
#print i, [d[i] for d in data]
elif self.__is_cat(data, i):
type_array.append('categorical')
else:
type_array.append('string')
return type_array
def __is_num(self, data, col):
"""
Internal method to determine whether data is numerical
"""
float_count = 0.0
for datum in data:
try:
float(datum[col].strip())
float_count = float_count + 1.0
except:
pass
return (float_count/len(data) > self.num_thresh)
def __is_cat(self, data, col):
"""
Internal method to determine whether data is categorical
defaults to number of distinct values is N/LogN
"""
counts = {}
for datum in data:
d = datum[col]
if d not in counts:
counts[d] = 0
counts[d] = counts[d] + 1
total = len([k for k in counts if counts[k] > 1])+0.0
return (total < self.cat_thresh)
|
{
"content_hash": "59ad2706b5119206a2610a2fb324ea46",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 59,
"avg_line_length": 20.2875,
"alnum_prop": 0.6383240911891559,
"repo_name": "sjyk/activedetect",
"id": "ad32a601aed6d5a56bec58c4a6ba78f0cb3834e6",
"size": "1623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "activedetect/loaders/type_inference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "291603"
},
{
"name": "Python",
"bytes": "65632"
}
],
"symlink_target": ""
}
|
import numpy as np
from scipy.integrate import ode
from .common import validate_tol, validate_first_step, warn_extraneous
from .base import OdeSolver, DenseOutput
class LSODA(OdeSolver):
"""Adams/BDF method with automatic stiffness detection and switching.
This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches
automatically between the nonstiff Adams method and the stiff BDF method.
The method was originally detailed in [2]_.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for this solver).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
min_step : float, optional
Minimum allowed step size. Default is 0.0, i.e., the step size is not
bounded and determined solely by the solver.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : None or callable, optional
Jacobian matrix of the right-hand side of the system with respect to
``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is
equal to ``d f_i / d y_j``. The function will be called as
``jac(t, y)``. If None (default), the Jacobian will be
approximated by finite differences. It is generally recommended to
provide the Jacobian rather than relying on a finite-difference
approximation.
lband, uband : int or None
Parameters defining the bandwidth of the Jacobian,
i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting
these requires your jac routine to return the Jacobian in the packed format:
the returned array must have ``n`` columns and ``uband + lband + 1``
rows in which Jacobian diagonals are written. Specifically
``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used
in `scipy.linalg.solve_banded` (check for an illustration).
These parameters can be also used with ``jac=None`` to reduce the
number of Jacobian elements estimated by finite differences.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. A vectorized
implementation offers no advantages for this solver. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
References
----------
.. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
pp. 55-64, 1983.
.. [2] L. Petzold, "Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations", SIAM Journal
on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
1983.
"""
def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,
max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,
uband=None, vectorized=False, **extraneous):
warn_extraneous(extraneous)
super(LSODA, self).__init__(fun, t0, y0, t_bound, vectorized)
if first_step is None:
first_step = 0 # LSODA value for automatic selection.
else:
first_step = validate_first_step(first_step, t0, t_bound)
first_step *= self.direction
if max_step == np.inf:
max_step = 0 # LSODA value for infinity.
elif max_step <= 0:
raise ValueError("`max_step` must be positive.")
if min_step < 0:
raise ValueError("`min_step` must be nonnegative.")
rtol, atol = validate_tol(rtol, atol, self.n)
solver = ode(self.fun, jac)
solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,
min_step=min_step, first_step=first_step,
lband=lband, uband=uband)
solver.set_initial_value(y0, t0)
# Inject t_bound into rwork array as needed for itask=5.
solver._integrator.rwork[0] = self.t_bound
solver._integrator.call_args[4] = solver._integrator.rwork
self._lsoda_solver = solver
def _step_impl(self):
solver = self._lsoda_solver
integrator = solver._integrator
# From lsoda.step and lsoda.integrate itask=5 means take a single
# step and do not go past t_bound.
itask = integrator.call_args[2]
integrator.call_args[2] = 5
solver._y, solver.t = integrator.run(
solver.f, solver.jac or (lambda: None), solver._y, solver.t,
self.t_bound, solver.f_params, solver.jac_params)
integrator.call_args[2] = itask
if solver.successful():
self.t = solver.t
self.y = solver._y
# From LSODA Fortran source njev is equal to nlu.
self.njev = integrator.iwork[12]
self.nlu = integrator.iwork[12]
return True, None
else:
return False, 'Unexpected istate in LSODA.'
def _dense_output_impl(self):
iwork = self._lsoda_solver._integrator.iwork
rwork = self._lsoda_solver._integrator.rwork
order = iwork[14]
h = rwork[11]
yh = np.reshape(rwork[20:20 + (order + 1) * self.n],
(self.n, order + 1), order='F').copy()
return LsodaDenseOutput(self.t_old, self.t, h, order, yh)
class LsodaDenseOutput(DenseOutput):
def __init__(self, t_old, t, h, order, yh):
super(LsodaDenseOutput, self).__init__(t_old, t)
self.h = h
self.yh = yh
self.p = np.arange(order + 1)
def _call_impl(self, t):
if t.ndim == 0:
x = ((t - self.t) / self.h) ** self.p
else:
x = ((t - self.t) / self.h) ** self.p[:, None]
return np.dot(self.yh, x)
|
{
"content_hash": "9a1cf0aa5ef6a10904cb88a23530cb17",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 84,
"avg_line_length": 42.670212765957444,
"alnum_prop": 0.6166791323859386,
"repo_name": "pizzathief/scipy",
"id": "9b695c7b77e8e02667e7ba1c68e81cc717323bed",
"size": "8022",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "scipy/integrate/_ivp/lsoda.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4395775"
},
{
"name": "C++",
"bytes": "649767"
},
{
"name": "Dockerfile",
"bytes": "1236"
},
{
"name": "Fortran",
"bytes": "5367672"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12449825"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
VERSION = '0.9.5'
setup(
name = 'finder_colors',
py_modules = ['finder_colors'],
scripts = ['finder_colors.py'],
version = VERSION,
description = 'Get/Set the Colors set on files by OSX Finder',
long_description=open('README.rst','r').read(),
author = 'Daniel Fairhead',
author_email = 'danthedeckie@gmail.com',
url = 'https://github.com/danthedeckie/finder_colors',
download_url = 'https://github.com/danthedeckie/finder_colors/tarball/' + VERSION,
keywords = ['OSX', 'OS X', 'Finder', 'Colors', 'Utility', 'Colours'],
install_requires = ['xattr', ],
classifiers = ['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Environment :: Console',
'Environment :: MacOS X',
'Intended Audience :: System Administrators',
'Operating System :: MacOS :: MacOS X',
'Topic :: Desktop Environment :: File Managers',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
)
|
{
"content_hash": "0580519a44e8cb62b92fc2e5549e4f3b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 86,
"avg_line_length": 43.29032258064516,
"alnum_prop": 0.5476900149031296,
"repo_name": "danthedeckie/finder_colors",
"id": "91d2bee8cdc973e2d2dd1a89f6d388f01d32f743",
"size": "1342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6078"
}
],
"symlink_target": ""
}
|
from dataclasses import dataclass
from typing import List
@dataclass
class URLRedirect:
old_url: str
new_url: str
API_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
# Add URL redirects for REST API documentation here:
URLRedirect("/api/delete-stream", "/api/archive-stream"),
]
POLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
# Add URL redirects for policy documentation here:
URLRedirect("/privacy/", "/policies/privacy"),
URLRedirect("/terms/", "/policies/terms"),
]
HELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
# Add URL redirects for help center documentation here:
URLRedirect("/help/recent-topics", "/help/recent-conversations"),
URLRedirect(
"/help/add-custom-profile-fields",
"/help/custom-profile-fields",
),
URLRedirect(
"/help/enable-enter-to-send",
"/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message",
),
URLRedirect(
"/help/change-the-default-language-for-your-organization",
"/help/configure-organization-language",
),
URLRedirect("/help/delete-a-stream", "/help/archive-a-stream"),
URLRedirect("/help/change-the-topic-of-a-message", "/help/rename-a-topic"),
URLRedirect("/help/configure-missed-message-emails", "/help/email-notifications"),
URLRedirect("/help/add-an-alert-word", "/help/pm-mention-alert-notifications#alert-words"),
URLRedirect("/help/test-mobile-notifications", "/help/mobile-notifications"),
URLRedirect(
"/help/troubleshooting-desktop-notifications",
"/help/desktop-notifications#troubleshooting-desktop-notifications",
),
URLRedirect(
"/help/change-notification-sound", "/help/desktop-notifications#change-notification-sound"
),
URLRedirect("/help/configure-message-notification-emails", "/help/email-notifications"),
URLRedirect("/help/disable-new-login-emails", "/help/email-notifications#new-login-emails"),
# The `help/about-streams-and-topics` redirect is particularly important,
# because the old URL appears in links from Welcome Bot messages.
URLRedirect("/help/about-streams-and-topics", "/help/streams-and-topics"),
URLRedirect("/help/community-topic-edits", "/help/configure-who-can-edit-topics"),
URLRedirect(
"/help/only-allow-admins-to-add-emoji", "/help/custom-emoji#change-who-can-add-custom-emoji"
),
URLRedirect(
"/help/configure-who-can-add-custom-emoji",
"/help/custom-emoji#change-who-can-add-custom-emoji",
),
URLRedirect("/help/add-custom-emoji", "/help/custom-emoji"),
URLRedirect("/help/night-mode", "/help/dark-theme"),
URLRedirect("/help/web-public-streams", "/help/public-access-option"),
]
LANDING_PAGE_REDIRECTS = [
# Add URL redirects for corporate landing pages here.
URLRedirect("/new-user/", "/hello"),
URLRedirect("/developer-community/", "/development-community"),
URLRedirect("/for/companies/", "/for/business"),
URLRedirect("/for/working-groups-and-communities/", "/for/communities"),
]
DOCUMENTATION_REDIRECTS = (
API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS
)
|
{
"content_hash": "5fcfbb8ec079709ac618b206e9b802ec",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 100,
"avg_line_length": 41.66233766233766,
"alnum_prop": 0.69856608478803,
"repo_name": "zulip/zulip",
"id": "868ced5bddb32ffd07170af9913ebe4e17b057de",
"size": "3208",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/lib/url_redirects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "509211"
},
{
"name": "Dockerfile",
"bytes": "4219"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "696430"
},
{
"name": "Handlebars",
"bytes": "384277"
},
{
"name": "JavaScript",
"bytes": "4098367"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112433"
},
{
"name": "Python",
"bytes": "10336945"
},
{
"name": "Ruby",
"bytes": "3166"
},
{
"name": "Shell",
"bytes": "147162"
},
{
"name": "TypeScript",
"bytes": "286785"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="multipip",
version="0.1",
licence="Apache License (2.0)",
description="multipip and py2rpm",
long_description=read("README.rst"),
author="Alessio Ababilov",
author_email="ilovegnulinux@gmail.com",
url="https://github.com/aababilov/multipip",
scripts=[
"multipip",
"py2rpm",
],
packages=[],
py_modules=[],
install_requires=read("requirements.txt")
)
|
{
"content_hash": "89ad1cdba9ce53b662313e5f0467199b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 70,
"avg_line_length": 22.44,
"alnum_prop": 0.6310160427807486,
"repo_name": "aababilov/multipip",
"id": "261bbcfed454fd187e473d4afa4178e284390b01",
"size": "624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29594"
}
],
"symlink_target": ""
}
|
import os
import sys
import datetime
from importlib import import_module
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
# Get configuration information from setup.cfg
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# By default, highlight as Python 3.
highlight_language = 'python3'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import_module(setup_cfg['name'])
package = sys.modules[setup_cfg['name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output --------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
html_theme_options = {
'logotext1': 'grizli', # white, semi-bold
'logotext2': 'docs', # orange, light
'logotext3': '' # white, light
}
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.join('_static', 'grizli.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ---------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['sphinx_astropy.ext.edit_on_github']
versionmod = import_module(setup_cfg['name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.release:
edit_on_github_branch = "v" + versionmod.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
# -- Resolving issue number to links in changelog -----------------------------
github_issues_url = 'https://github.com/{0}/issues/'.format(setup_cfg['github_project'])
# -- Turn on nitpicky mode for sphinx (to warn about references not found) ----
#
# nitpicky = True
# nitpick_ignore = []
#
# Some warnings are impossible to suppress, and you can list specific references
# that should be ignored in a nitpick-exceptions file which should be inside
# the docs/ directory. The format of the file should be:
#
# <type> <class>
#
# for example:
#
# py:class astropy.io.votable.tree.Element
# py:class astropy.io.votable.tree.SimpleElement
# py:class astropy.io.votable.tree.SimpleElementWithContent
#
# Uncomment the following lines to enable the exceptions:
#
# for line in open('nitpick-exceptions'):
# if line.strip() == "" or line.startswith("#"):
# continue
# dtype, target = line.split(None, 1)
# target = target.strip()
# nitpick_ignore.append((dtype, six.u(target)))
|
{
"content_hash": "2d6a5e5b5e4f80603293d0ef07878dfd",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 89,
"avg_line_length": 34.45882352941177,
"alnum_prop": 0.6695117787640833,
"repo_name": "gbrammer/grizli",
"id": "69f790636b6e9572e7b9e504d853d9902f9a2165",
"size": "7344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "20306"
},
{
"name": "Python",
"bytes": "2117532"
}
],
"symlink_target": ""
}
|
from cursor import Cursor
from character import Character
class Document:
def __init__(self):
self.characters = []
self.cursor = Cursor(self)
self.filename = '';
def insert(self, character):
if not hasattr(character, 'character'):
character = Character(character)
self.characters.insert(self.cursor.position, character)
self.cursor.forward()
def delete(self):
del self.characters[self.cursor.position]
def save(self):
f = open(self.filename, 'w')
f.write(''.join(self.characters))
f.close()
@property
def string(self):
return ''.join((str(c) for c in self.characters))
|
{
"content_hash": "1b6c7f9d120f064f6fef7d27afdbb34f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 63,
"avg_line_length": 27.88,
"alnum_prop": 0.6097560975609756,
"repo_name": "JMwill/wiki",
"id": "75ce2e8149147f88bb6c0294d4f3887c62ecc2a3",
"size": "697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebook/book/python/Learn-OOP-with-Python/Chapter-5/Document/document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "161247"
},
{
"name": "HTML",
"bytes": "249039"
},
{
"name": "JavaScript",
"bytes": "3940057"
},
{
"name": "Python",
"bytes": "105911"
},
{
"name": "QML",
"bytes": "2985"
},
{
"name": "Racket",
"bytes": "1296"
},
{
"name": "Ruby",
"bytes": "4126"
},
{
"name": "Scheme",
"bytes": "8261"
},
{
"name": "Shell",
"bytes": "4462"
},
{
"name": "Vim script",
"bytes": "12735"
},
{
"name": "Vue",
"bytes": "64287"
},
{
"name": "mupad",
"bytes": "3496"
}
],
"symlink_target": ""
}
|
import sys
import os
import csv
import itertools
import json
import urllib
SPREADSHEET_URL = 'https://docs.google.com/spreadsheet/pub?key=0Al9obkz_TwDLdEpqcEM5bVRSc3FXczF3Vl80Wk53eEE&output=csv'
class Vocabulary(object):
template = """\
var vocabulary = {texts!s};
"""
class Texts(list):
def __str__(self):
return json.dumps(self)
def __init__(self):
self.texts = Vocabulary.Texts()
def __str__(self):
return self.template.format(**self.__dict__)
def feed(self, field):
self.texts.append(field or None)
class Translation(Vocabulary):
template = """\
// {language} - {translators}
case '{code}':
return {texts!s};
"""
def __init__(self):
super(Translation, self).__init__()
self.language = None
self.code = None
self.translators = None
self.completeness = None
def feed(self, field):
if self.language is None:
self.language, self.code = (part.strip() for part in field.split('/'))
elif self.translators is None:
self.translators = field
elif self.completeness is None:
self.completeness = float(field[:-1])
else:
super(Translation, self).feed(field)
def main(output_dir):
sheet = urllib.urlopen(SPREADSHEET_URL)
vocabulary = Vocabulary()
translations = None
for ln, line in enumerate(csv.reader(sheet)):
fields = [field.decode('utf-8').strip() for field in line]
if translations is None:
translations = [Translation() for field in fields[1:]]
if ln > 2:
vocabulary.feed(fields[0])
for translation, field in itertools.izip(translations, fields[1:]):
translation.feed(field)
sheet.close()
with open(os.path.join(output_dir, 'vocabulary.js'), 'w') as vocabulary_file:
vocabulary_file.write(str(vocabulary))
for translation in translations:
with open(os.path.join(output_dir, translation.code + '.js'), 'w') as translation_file:
translation_file.write(str(translation))
if __name__ == '__main__':
main(os.path.abspath(sys.argv[1]) if len(sys.argv) == 2 else os.getcwd())
|
{
"content_hash": "3e74d15b1d4053fb53abd72157ffddc8",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 119,
"avg_line_length": 24.419753086419753,
"alnum_prop": 0.6890798786653185,
"repo_name": "eugenox/yays",
"id": "3e32da32b3aa28c7d21f2e1663f3ca217d3bb5eb",
"size": "2003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utility/translation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "43749"
},
{
"name": "Makefile",
"bytes": "862"
},
{
"name": "Python",
"bytes": "5612"
},
{
"name": "VimL",
"bytes": "107"
}
],
"symlink_target": ""
}
|
"""DetCon/BYOL losses."""
import haiku as hk
import jax
import jax.numpy as jnp
from detcon.utils import helpers
def manual_cross_entropy(labels, logits, weight):
ce = - weight * jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
return jnp.mean(ce)
def byol_nce_detcon(pred1, pred2, target1, target2,
pind1, pind2, tind1, tind2,
temperature=0.1, use_replicator_loss=True,
local_negatives=True):
"""Compute the NCE scores from pairs of predictions and targets.
This implements the batched form of the loss described in
Section 3.1, Equation 3 in https://arxiv.org/pdf/2103.10957.pdf.
Args:
pred1 (jnp.array): the prediction from first view.
pred2 (jnp.array): the prediction from second view.
target1 (jnp.array): the projection from first view.
target2 (jnp.array): the projection from second view.
pind1 (jnp.array): mask indices for first view's prediction.
pind2 (jnp.array): mask indices for second view's prediction.
tind1 (jnp.array): mask indices for first view's projection.
tind2 (jnp.array): mask indices for second view's projection.
temperature (float): the temperature to use for the NCE loss.
use_replicator_loss (bool): use cross-replica samples.
local_negatives (bool): whether to include local negatives
Returns:
A single scalar loss for the XT-NCE objective.
"""
batch_size = pred1.shape[0]
num_rois = pred1.shape[1]
feature_dim = pred1.shape[-1]
infinity_proxy = 1e9 # Used for masks to proxy a very large number.
def make_same_obj(ind_0, ind_1):
same_obj = jnp.equal(ind_0.reshape([batch_size, num_rois, 1]),
ind_1.reshape([batch_size, 1, num_rois]))
return jnp.expand_dims(same_obj.astype("float32"), axis=2)
same_obj_aa = make_same_obj(pind1, tind1)
same_obj_ab = make_same_obj(pind1, tind2)
same_obj_ba = make_same_obj(pind2, tind1)
same_obj_bb = make_same_obj(pind2, tind2)
# L2 normalize the tensors to use for the cosine-similarity
pred1 = helpers.l2_normalize(pred1, axis=-1)
pred2 = helpers.l2_normalize(pred2, axis=-1)
target1 = helpers.l2_normalize(target1, axis=-1)
target2 = helpers.l2_normalize(target2, axis=-1)
if jax.device_count() > 1 and use_replicator_loss:
# Grab tensor across replicas and expand first dimension
target1_large = jax.lax.all_gather(target1, axis_name="i")
target2_large = jax.lax.all_gather(target2, axis_name="i")
# Fold into batch dimension
target1_large = target1_large.reshape(-1, num_rois, feature_dim)
target2_large = target2_large.reshape(-1, num_rois, feature_dim)
# Create the labels by using the current replica ID and offsetting.
replica_id = jax.lax.axis_index("i")
labels_idx = jnp.arange(batch_size) + replica_id * batch_size
labels_idx = labels_idx.astype(jnp.int32)
enlarged_batch_size = target1_large.shape[0]
labels_local = hk.one_hot(labels_idx, enlarged_batch_size)
labels_ext = hk.one_hot(labels_idx, enlarged_batch_size * 2)
else:
target1_large = target1
target2_large = target2
labels_local = hk.one_hot(jnp.arange(batch_size), batch_size)
labels_ext = hk.one_hot(jnp.arange(batch_size), batch_size * 2)
labels_local = jnp.expand_dims(jnp.expand_dims(labels_local, axis=2), axis=1)
labels_ext = jnp.expand_dims(jnp.expand_dims(labels_ext, axis=2), axis=1)
# Do our matmuls and mask out appropriately.
logits_aa = jnp.einsum("abk,uvk->abuv", pred1, target1_large) / temperature
logits_bb = jnp.einsum("abk,uvk->abuv", pred2, target2_large) / temperature
logits_ab = jnp.einsum("abk,uvk->abuv", pred1, target2_large) / temperature
logits_ba = jnp.einsum("abk,uvk->abuv", pred2, target1_large) / temperature
labels_aa = labels_local * same_obj_aa
labels_ab = labels_local * same_obj_ab
labels_ba = labels_local * same_obj_ba
labels_bb = labels_local * same_obj_bb
logits_aa = logits_aa - infinity_proxy * labels_local * same_obj_aa
logits_bb = logits_bb - infinity_proxy * labels_local * same_obj_bb
labels_aa = 0. * labels_aa
labels_bb = 0. * labels_bb
if not local_negatives:
logits_aa = logits_aa - infinity_proxy * labels_local * (1 - same_obj_aa)
logits_ab = logits_ab - infinity_proxy * labels_local * (1 - same_obj_ab)
logits_ba = logits_ba - infinity_proxy * labels_local * (1 - same_obj_ba)
logits_bb = logits_bb - infinity_proxy * labels_local * (1 - same_obj_bb)
labels_abaa = jnp.concatenate([labels_ab, labels_aa], axis=2)
labels_babb = jnp.concatenate([labels_ba, labels_bb], axis=2)
labels_0 = jnp.reshape(labels_abaa, [batch_size, num_rois, -1])
labels_1 = jnp.reshape(labels_babb, [batch_size, num_rois, -1])
num_positives_0 = jnp.sum(labels_0, axis=-1, keepdims=True)
num_positives_1 = jnp.sum(labels_1, axis=-1, keepdims=True)
labels_0 = labels_0 / jnp.maximum(num_positives_0, 1)
labels_1 = labels_1 / jnp.maximum(num_positives_1, 1)
obj_area_0 = jnp.sum(make_same_obj(pind1, pind1), axis=[2, 3])
obj_area_1 = jnp.sum(make_same_obj(pind2, pind2), axis=[2, 3])
weights_0 = jnp.greater(num_positives_0[..., 0], 1e-3).astype("float32")
weights_0 = weights_0 / obj_area_0
weights_1 = jnp.greater(num_positives_1[..., 0], 1e-3).astype("float32")
weights_1 = weights_1 / obj_area_1
logits_abaa = jnp.concatenate([logits_ab, logits_aa], axis=2)
logits_babb = jnp.concatenate([logits_ba, logits_bb], axis=2)
logits_abaa = jnp.reshape(logits_abaa, [batch_size, num_rois, -1])
logits_babb = jnp.reshape(logits_babb, [batch_size, num_rois, -1])
loss_a = manual_cross_entropy(labels_0, logits_abaa, weights_0)
loss_b = manual_cross_entropy(labels_1, logits_babb, weights_1)
loss = loss_a + loss_b
return loss
|
{
"content_hash": "7e115d97adb82cf90e391b5de6796db6",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 41.869565217391305,
"alnum_prop": 0.6824160609207338,
"repo_name": "deepmind/detcon",
"id": "805c44c1849db3d7dc8e1e840969617526169348",
"size": "6452",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "utils/losses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "130136"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
}
|
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1, operations_v1
from google.api_core import retry as retries
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
from google.cloud.tpu_v2alpha1.types import cloud_tpu
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-tpu",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class TpuTransport(abc.ABC):
"""Abstract transport class for Tpu."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "tpu.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_nodes: gapic_v1.method.wrap_method(
self.list_nodes,
default_timeout=None,
client_info=client_info,
),
self.get_node: gapic_v1.method.wrap_method(
self.get_node,
default_timeout=None,
client_info=client_info,
),
self.create_node: gapic_v1.method.wrap_method(
self.create_node,
default_timeout=None,
client_info=client_info,
),
self.delete_node: gapic_v1.method.wrap_method(
self.delete_node,
default_timeout=None,
client_info=client_info,
),
self.stop_node: gapic_v1.method.wrap_method(
self.stop_node,
default_timeout=None,
client_info=client_info,
),
self.start_node: gapic_v1.method.wrap_method(
self.start_node,
default_timeout=None,
client_info=client_info,
),
self.update_node: gapic_v1.method.wrap_method(
self.update_node,
default_timeout=None,
client_info=client_info,
),
self.generate_service_identity: gapic_v1.method.wrap_method(
self.generate_service_identity,
default_timeout=None,
client_info=client_info,
),
self.list_accelerator_types: gapic_v1.method.wrap_method(
self.list_accelerator_types,
default_timeout=None,
client_info=client_info,
),
self.get_accelerator_type: gapic_v1.method.wrap_method(
self.get_accelerator_type,
default_timeout=None,
client_info=client_info,
),
self.list_runtime_versions: gapic_v1.method.wrap_method(
self.list_runtime_versions,
default_timeout=None,
client_info=client_info,
),
self.get_runtime_version: gapic_v1.method.wrap_method(
self.get_runtime_version,
default_timeout=None,
client_info=client_info,
),
self.get_guest_attributes: gapic_v1.method.wrap_method(
self.get_guest_attributes,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_nodes(
self,
) -> Callable[
[cloud_tpu.ListNodesRequest],
Union[cloud_tpu.ListNodesResponse, Awaitable[cloud_tpu.ListNodesResponse]],
]:
raise NotImplementedError()
@property
def get_node(
self,
) -> Callable[
[cloud_tpu.GetNodeRequest], Union[cloud_tpu.Node, Awaitable[cloud_tpu.Node]]
]:
raise NotImplementedError()
@property
def create_node(
self,
) -> Callable[
[cloud_tpu.CreateNodeRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_node(
self,
) -> Callable[
[cloud_tpu.DeleteNodeRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def stop_node(
self,
) -> Callable[
[cloud_tpu.StopNodeRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def start_node(
self,
) -> Callable[
[cloud_tpu.StartNodeRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_node(
self,
) -> Callable[
[cloud_tpu.UpdateNodeRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def generate_service_identity(
self,
) -> Callable[
[cloud_tpu.GenerateServiceIdentityRequest],
Union[
cloud_tpu.GenerateServiceIdentityResponse,
Awaitable[cloud_tpu.GenerateServiceIdentityResponse],
],
]:
raise NotImplementedError()
@property
def list_accelerator_types(
self,
) -> Callable[
[cloud_tpu.ListAcceleratorTypesRequest],
Union[
cloud_tpu.ListAcceleratorTypesResponse,
Awaitable[cloud_tpu.ListAcceleratorTypesResponse],
],
]:
raise NotImplementedError()
@property
def get_accelerator_type(
self,
) -> Callable[
[cloud_tpu.GetAcceleratorTypeRequest],
Union[cloud_tpu.AcceleratorType, Awaitable[cloud_tpu.AcceleratorType]],
]:
raise NotImplementedError()
@property
def list_runtime_versions(
self,
) -> Callable[
[cloud_tpu.ListRuntimeVersionsRequest],
Union[
cloud_tpu.ListRuntimeVersionsResponse,
Awaitable[cloud_tpu.ListRuntimeVersionsResponse],
],
]:
raise NotImplementedError()
@property
def get_runtime_version(
self,
) -> Callable[
[cloud_tpu.GetRuntimeVersionRequest],
Union[cloud_tpu.RuntimeVersion, Awaitable[cloud_tpu.RuntimeVersion]],
]:
raise NotImplementedError()
@property
def get_guest_attributes(
self,
) -> Callable[
[cloud_tpu.GetGuestAttributesRequest],
Union[
cloud_tpu.GetGuestAttributesResponse,
Awaitable[cloud_tpu.GetGuestAttributesResponse],
],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("TpuTransport",)
|
{
"content_hash": "8ae3c3a2055df4da2f27f04365a4ef25",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 101,
"avg_line_length": 33.75454545454546,
"alnum_prop": 0.592153694227489,
"repo_name": "googleapis/python-tpu",
"id": "827330e1e1ddfa3857cdf06d4c9d3cb0da7a1411",
"size": "11739",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/tpu_v2alpha1/services/tpu/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "828092"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
}
|
"""luigi bindings for Google Dataproc on Google Cloud"""
import os
import time
import logging
import luigi
logger = logging.getLogger('luigi-interface')
_dataproc_client = None
try:
import httplib2
import oauth2client.client
from googleapiclient import discovery
from googleapiclient.errors import HttpError
DEFAULT_CREDENTIALS = oauth2client.client.GoogleCredentials.get_application_default()
_dataproc_client = discovery.build('dataproc', 'v1', credentials=DEFAULT_CREDENTIALS, http=httplib2.Http())
except ImportError:
logger.warning("Loading Dataproc module without the python packages googleapiclient & oauth2client. \
This will crash at runtime if Dataproc functionality is used.")
def get_dataproc_client():
return _dataproc_client
def set_dataproc_client(client):
global _dataproc_client
_dataproc_client = client
class _DataprocBaseTask(luigi.Task):
gcloud_project_id = luigi.Parameter(significant=False, positional=False)
dataproc_cluster_name = luigi.Parameter(significant=False, positional=False)
dataproc_region = luigi.Parameter(default="global", significant=False, positional=False)
dataproc_client = get_dataproc_client()
class DataprocBaseTask(_DataprocBaseTask):
"""
Base task for running jobs in Dataproc. It is recommended to use one of the tasks specific to your job type.
Extend this class if you need fine grained control over what kind of job gets submitted to your Dataproc cluster.
"""
_job = None
_job_name = None
_job_id = None
def submit_job(self, job_config):
self._job = self.dataproc_client.projects().regions().jobs()\
.submit(projectId=self.gcloud_project_id, region=self.dataproc_region, body=job_config).execute()
self._job_id = self._job['reference']['jobId']
return self._job
def submit_spark_job(self, jars, main_class, job_args=[]):
job_config = {"job": {
"placement": {
"clusterName": self.dataproc_cluster_name
},
"sparkJob": {
"args": job_args,
"mainClass": main_class,
"jarFileUris": jars
}
}}
self.submit_job(job_config)
self._job_name = os.path.basename(self._job['sparkJob']['mainClass'])
logger.info("Submitted new dataproc job:{} id:{}".format(self._job_name, self._job_id))
return self._job
def submit_pyspark_job(self, job_file, extra_files=[], job_args=[]):
job_config = {"job": {
"placement": {
"clusterName": self.dataproc_cluster_name
},
"pysparkJob": {
"mainPythonFileUri": job_file,
"pythonFileUris": extra_files,
"args": job_args
}
}}
self.submit_job(job_config)
self._job_name = os.path.basename(self._job['pysparkJob']['mainPythonFileUri'])
logger.info("Submitted new dataproc job:{} id:{}".format(self._job_name, self._job_id))
return self._job
def wait_for_job(self):
if self._job is None:
raise Exception("You must submit a job before you can wait for it")
while True:
job_result = self.dataproc_client.projects().regions().jobs()\
.get(projectId=self.gcloud_project_id, region=self.dataproc_region, jobId=self._job_id).execute()
status = job_result['status']['state']
logger.info("Current dataproc status: {} job:{} id:{}".format(status, self._job_name, self._job_id))
if status == 'DONE':
break
if status == 'ERROR':
raise Exception(job_result['status']['details'])
time.sleep(5)
class DataprocSparkTask(DataprocBaseTask):
"""
Runs a spark jobs on your Dataproc cluster
"""
main_class = luigi.Parameter()
jars = luigi.Parameter(default="")
job_args = luigi.Parameter(default="")
def run(self):
self.submit_spark_job(main_class=self.main_class,
jars=self.jars.split(",") if self.jars else [],
job_args=self.job_args.split(",") if self.job_args else [])
self.wait_for_job()
class DataprocPysparkTask(DataprocBaseTask):
"""
Runs a pyspark jobs on your Dataproc cluster
"""
job_file = luigi.Parameter()
extra_files = luigi.Parameter(default="")
job_args = luigi.Parameter(default="")
def run(self):
self.submit_pyspark_job(job_file="main_job.py",
extra_files=self.extra_files.split(",") if self.extra_files else [],
job_args=self.job_args.split(",") if self.job_args else [])
self.wait_for_job()
class CreateDataprocClusterTask(_DataprocBaseTask):
""" Task for creating a Dataproc cluster. """
gcloud_zone = luigi.Parameter(default="europe-west1-c")
gcloud_network = luigi.Parameter(default="default")
master_node_type = luigi.Parameter(default="n1-standard-2")
master_disk_size = luigi.Parameter(default="100")
worker_node_type = luigi.Parameter(default="n1-standard-2")
worker_disk_size = luigi.Parameter(default="100")
worker_normal_count = luigi.Parameter(default="2")
worker_preemptible_count = luigi.Parameter(default="0")
def _get_cluster_status(self):
return self.dataproc_client.projects().regions().clusters()\
.get(projectId=self.gcloud_project_id, region=self.dataproc_region, clusterName=self.dataproc_cluster_name)\
.execute()
def complete(self):
try:
self._get_cluster_status()
return True # No (404) error so the cluster already exists
except HttpError as e:
if e.resp.status == 404:
return False # We got a 404 so the cluster doesn't exist yet
else:
raise e # Something's wrong ...
def run(self):
base_uri = "https://www.googleapis.com/compute/v1/projects/{}".format(self.gcloud_project_id)
cluster_conf = {
"clusterName": self.dataproc_cluster_name,
"projectId": self.gcloud_project_id,
"config": {
"configBucket": "",
"gceClusterConfig": {
"networkUri": base_uri + "/global/networks/" + self.gcloud_network,
"zoneUri": base_uri + "/zones/" + self.gcloud_zone,
"serviceAccountScopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
"masterConfig": {
"numInstances": 1,
"machineTypeUri": base_uri + "/zones/" + self.gcloud_zone + "/machineTypes/" + self.master_node_type,
"diskConfig": {
"bootDiskSizeGb": self.master_disk_size,
"numLocalSsds": 0
}
},
"workerConfig": {
"numInstances": self.worker_normal_count,
"machineTypeUri": base_uri + "/zones/" + self.gcloud_zone + "/machineTypes/" + self.worker_node_type,
"diskConfig": {
"bootDiskSizeGb": self.worker_disk_size,
"numLocalSsds": 0
}
},
"secondaryWorkerConfig": {
"numInstances": self.worker_preemptible_count,
"isPreemptible": True
}
}
}
self.dataproc_client.projects().regions().clusters()\
.create(projectId=self.gcloud_project_id, region=self.dataproc_region, body=cluster_conf).execute()
while True:
time.sleep(10)
cluster_status = self._get_cluster_status()
status = cluster_status['status']['state']
logger.info("Creating new dataproc cluster: {} status: {}".format(self.dataproc_cluster_name, status))
if status == 'RUNNING':
break
if status == 'ERROR':
raise Exception(cluster_status['status']['details'])
class DeleteDataprocClusterTask(_DataprocBaseTask):
"""
Task for deleting a Dataproc cluster.
One of the uses for this class is to extend it and have it require a Dataproc task that does a calculation and have
that task extend the cluster creation task. This allows you to create chains where you create a cluster,
run your job and remove the cluster right away.
(Store your input and output files in gs://... instead of hdfs://... if you do this).
"""
def _get_cluster_status(self):
try:
return self.dataproc_client.projects().regions().clusters()\
.get(projectId=self.gcloud_project_id, region=self.dataproc_region,
clusterName=self.dataproc_cluster_name, fields="status")\
.execute()
except HttpError as e:
if e.resp.status == 404:
return None # We got a 404 so the cluster doesn't exist
else:
raise e
def complete(self): return self._get_cluster_status() is None
def run(self):
self.dataproc_client.projects().regions().clusters()\
.delete(projectId=self.gcloud_project_id, region=self.dataproc_region, clusterName=self.dataproc_cluster_name).execute()
while True:
time.sleep(10)
status = self._get_cluster_status()
if status is None:
logger.info("Finished shutting down cluster: {}".format(self.dataproc_cluster_name))
break
logger.info("Shutting down cluster: {} current status: {}".format(self.dataproc_cluster_name, status['status']['state']))
|
{
"content_hash": "4f8a2cb83c2c87f2cbefafcfd2b30ad9",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 133,
"avg_line_length": 40.03238866396761,
"alnum_prop": 0.5893001618122977,
"repo_name": "casey-green/luigi",
"id": "a6af258b60cf3674e67d1b44f9ef941b57b573ee",
"size": "9888",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "luigi/contrib/dataproc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2162"
},
{
"name": "HTML",
"bytes": "30362"
},
{
"name": "JavaScript",
"bytes": "79970"
},
{
"name": "Python",
"bytes": "1304373"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
}
|
import string
import cStringIO
import leveldb
import io
import os
import time
import binascii
import shutil
import logging
from decimal import Decimal
from cache import Cache
from common import *
from lib.serialize import *
from lib.core import *
from lib.messages import msg_block, message_to_str, message_read
from lib.coredefs import COIN
from lib.scripteval import VerifySignature
from lib import utils
def tx_blk_cmp(a, b):
if a.dFeePerKB != b.dFeePerKB:
return int(a.dFeePerKB - b.dFeePerKB)
return int(a.dPriority - b.dPriority)
def block_value(height, fees):
subsidy = 50 * COIN
subsidy >>= (height / 210000)
return subsidy + fees
class TxIdx(object):
def __init__(self, blkhash=0L, spentmask=0L):
self.blkhash = blkhash
self.spentmask = spentmask
class BlkMeta(object):
def __init__(self):
self.height = -1
self.work = 0L
def deserialize(self, s):
l = s.split()
if len(l) < 2:
raise RuntimeError
self.height = int(l[0])
self.work = long(l[1], 16)
def serialize(self):
r = str(self.height) + ' ' + hex(self.work)
return r
def __repr__(self):
return "BlkMeta(height %d, work %x)" % (self.height, self.work)
class HeightIdx(object):
def __init__(self):
self.blocks = []
def deserialize(self, s):
self.blocks = []
l = s.split()
for hashstr in l:
hash = long(hashstr, 16)
self.blocks.append(hash)
def serialize(self):
l = []
for blkhash in self.blocks:
l.append(hex(blkhash))
return ' '.join(l)
def __repr__(self):
return "HeightIdx(blocks=%s)" % (self.serialize(),)
class ChainDb(object):
def __init__(self, settings, datadir, mempool, wallet, netmagic, readonly=False, fast_dbm=False):
self.settings = settings
self.mempool = mempool
self.wallet = wallet
self.readonly = readonly
self.netmagic = netmagic
self.fast_dbm = fast_dbm
self.blk_cache = Cache(500)
self.orphans = {}
self.orphan_deps = {}
# setup logging
logging.basicConfig(level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
# LevelDB to hold:
# tx:* transaction outputs
# misc:* state
# height:* list of blocks at height h
# blkmeta:* block metadata
# blocks:* block seek point in stream
self.blk_write = io.BufferedWriter(io.FileIO(datadir + '/blocks.dat','ab'))
self.blk_read = io.BufferedReader(io.FileIO(datadir + '/blocks.dat','rb'))
self.db = leveldb.LevelDB(datadir + '/leveldb')
try:
self.db.Get('misc:height')
except KeyError:
self.logger.info("INITIALIZING EMPTY BLOCKCHAIN DATABASE")
batch = leveldb.WriteBatch()
batch.Put('misc:height', str(-1))
batch.Put('misc:msg_start', self.netmagic.msg_start)
batch.Put('misc:tophash', ser_uint256(0L))
batch.Put('misc:total_work', hex(0L))
self.db.Write(batch)
try:
start = self.db.Get('misc:msg_start')
if start != self.netmagic.msg_start: raise KeyError
except KeyError:
self.logger.info("Database magic number mismatch. Data corruption or incorrect network?")
raise RuntimeError
def puttxidx(self, txhash, txidx, batch=None):
ser_txhash = ser_uint256(txhash)
try:
self.db.Get('tx:'+ser_txhash)
old_txidx = self.gettxidx(txhash)
self.logger.info("WARNING: overwriting duplicate TX %064x, height %d, oldblk %064x, \
oldspent %x, newblk %064x" % (txhash, self.getheight(), old_txidx.blkhash, old_txidx.spentmask, txidx.blkhash))
except KeyError:
pass
batch = self.db if batch is not None else batch
batch.Put('tx:'+ser_txhash, hex(txidx.blkhash) + ' ' +
hex(txidx.spentmask))
return True
def getbalance(self, address):
balance = 0.0
txouts = self.listreceivedbyaddress(address)
for txout in txouts.itervalues():
balance = balance + txout['value']
return balance
"""
# scan the blocks for transactions to this address
chain_height = 10
for index in range(height, chain_height):
received, sent = scan_block(index, address)
balabce = balance + received - sent
if height < chain_height:
"""
def sendtoaddress(self, toaddress, amount):
tx = self.wallet.sendtoaddress(toaddress, amount)
self.mempool.add(tx)
def listreceivedbyaddress(self, address):
txouts = {}
end_height = self.getheight()
public_key_hash_hex = binascii.hexlify(utils.address_to_public_key_hash(address))
for height in xrange(end_height):
data = self.db.Get('height:' + str(height))
heightidx = HeightIdx()
heightidx.deserialize(data)
blkhash = heightidx.blocks[0]
block = self.getblock(blkhash)
for tx in block.vtx:
# if this transaction refers to this address in input, remove the previous transaction
for txin in tx.vin:
if not txin.scriptSig:
continue
script_key_hash_hex = binascii.hexlify(utils.scriptSig_to_public_key_hash(txin.scriptSig))
# print 'script_key_hash_hex: ', script_key_hash_hex
# print 'public_key_hash_hex: ', public_key_hash_hex
if script_key_hash_hex == public_key_hash_hex:
del txouts[txin.prevout.hash]
# if this transaction refers to this address in output, add this transaction
for n, txout in enumerate(tx.vout):
script_key_hash_hex = utils.output_script_to_public_key_hash(txout.scriptPubKey)
# print 'script_key_hash_hex: ', script_key_hash_hex
# print 'public_key_hash_hex: ', public_key_hash_hex
if script_key_hash_hex == public_key_hash_hex:
tx.calc_sha256()
txouts[tx.sha256] = {'txhash': tx.sha256, 'n': n, 'value': txout.nValue, \
'scriptPubKey': binascii.hexlify(txout.scriptPubKey)}
return txouts
def gettxidx(self, txhash):
ser_txhash = ser_uint256(txhash)
try:
ser_value = self.db.Get('tx:'+ser_txhash)
except KeyError:
return None
pos = string.find(ser_value, ' ')
txidx = TxIdx()
txidx.blkhash = long(ser_value[:pos], 16)
txidx.spentmask = long(ser_value[pos+1:], 16)
return txidx
def gettx(self, txhash):
txidx = self.gettxidx(txhash)
if txidx is None:
return None
block = self.getblock(txidx.blkhash)
for tx in block.vtx:
tx.calc_sha256()
if tx.sha256 == txhash:
return tx
self.logger.info("ERROR: Missing TX %064x in block %064x" % (txhash, txidx.blkhash))
return None
def haveblock(self, blkhash, checkorphans):
if self.blk_cache.exists(blkhash):
return True
if checkorphans and blkhash in self.orphans:
return True
ser_hash = ser_uint256(blkhash)
try:
self.db.Get('blocks:'+ser_hash)
return True
except KeyError:
return False
def have_prevblock(self, block):
if self.getheight() < 0 and block.sha256 == self.netmagic.block0:
return True
if self.haveblock(block.hashPrevBlock, False):
return True
return False
def getblock(self, blkhash):
block = self.blk_cache.get(blkhash)
if block is not None:
return block
ser_hash = ser_uint256(blkhash)
try:
# Lookup the block index, seek in the file
fpos = long(self.db.Get('blocks:'+ser_hash))
self.blk_read.seek(fpos)
# read and decode "block" msg
msg = message_read(self.netmagic, self.blk_read)
if msg is None:
return None
block = msg.block
except KeyError:
return None
self.blk_cache.put(blkhash, block)
return block
def spend_txout(self, txhash, n_idx, batch=None):
txidx = self.gettxidx(txhash)
if txidx is None:
return False
txidx.spentmask |= (1L << n_idx)
self.puttxidx(txhash, txidx, batch)
return True
def clear_txout(self, txhash, n_idx, batch=None):
txidx = self.gettxidx(txhash)
if txidx is None:
return False
txidx.spentmask &= ~(1L << n_idx)
self.puttxidx(txhash, txidx, batch)
return True
def unique_outpts(self, block):
outpts = {}
txmap = {}
for tx in block.vtx:
if tx.is_coinbase:
continue
txmap[tx.sha256] = tx
for txin in tx.vin:
v = (txin.prevout.hash, txin.prevout.n)
if v in outs:
return None
outpts[v] = False
return (outpts, txmap)
def txout_spent(self, txout):
txidx = self.gettxidx(txout.hash)
if txidx is None:
return None
if txout.n > 100000: # outpoint index sanity check
return None
if txidx.spentmask & (1L << txout.n):
return True
return False
def spent_outpts(self, block):
# list of outpoints this block wants to spend
l = self.unique_outpts(block)
if l is None:
return None
outpts = l[0]
txmap = l[1]
spendlist = {}
# pass 1: if outpoint in db, make sure it is unspent
for k in outpts.iterkeys():
outpt = COutPoint()
outpt.hash = k[0]
outpt.n = k[1]
rc = self.txout_spent(outpt)
if rc is None:
continue
if rc:
return None
outpts[k] = True # skip in pass 2
# pass 2: remaining outpoints must exist in this block
for k, v in outpts.iteritems():
if v:
continue
if k[0] not in txmap: # validate txout hash
return None
tx = txmap[k[0]] # validate txout index (n)
if k[1] >= len(tx.vout):
return None
# outpts[k] = True # not strictly necessary
return outpts.keys()
def tx_signed(self, tx, block, check_mempool):
tx.calc_sha256()
for i in xrange(len(tx.vin)):
txin = tx.vin[i]
# search database for dependent TX
txfrom = self.gettx(txin.prevout.hash)
# search block for dependent TX
if txfrom is None and block is not None:
for blktx in block.vtx:
blktx.calc_sha256()
if blktx.sha256 == txin.prevout.hash:
txfrom = blktx
break
# search mempool for dependent TX
if txfrom is None and check_mempool:
try:
txfrom = self.mempool.pool[txin.prevout.hash]
except:
self.logger.info("TX %064x/%d no-dep %064x" %
(tx.sha256, i,
txin.prevout.hash))
return False
if txfrom is None:
self.logger.info("TX %064x/%d no-dep %064x" %
(tx.sha256, i,
txin.prevout.hash))
return False
if not VerifySignature(txfrom, tx, i, 0):
self.logger.info("TX %064x/%d sigfail" %
(tx.sha256, i))
return False
return True
def tx_is_orphan(self, tx):
if not tx.is_valid():
return None
for txin in tx.vin:
rc = self.txout_spent(txin.prevout)
if rc is None: # not found: orphan
try:
txfrom = self.mempool.pool[txin.prevout.hash]
except:
return True
if txin.prevout.n >= len(txfrom.vout):
return None
if rc is True: # spent? strange
return None
return False
def connect_block(self, ser_hash, block, blkmeta):
# verify against checkpoint list
try:
chk_hash = self.netmagic.checkpoints[blkmeta.height]
if chk_hash != block.sha256:
self.logger.info("Block %064x does not match checkpoint hash %064x, height %d" % (
block.sha256, chk_hash, blkmeta.height))
return False
except KeyError:
pass
# check TX connectivity
outpts = self.spent_outpts(block)
if outpts is None:
self.logger.info("Unconnectable block %064x" % (block.sha256, ))
return False
# verify script signatures
if ('nosig' not in self.settings and
('forcesig' in self.settings or
blkmeta.height > self.netmagic.checkpoint_max)):
for tx in block.vtx:
tx.calc_sha256()
if tx.is_coinbase():
continue
if not self.tx_signed(tx, block, False):
self.logger.info("Invalid signature in block %064x" % (block.sha256, ))
return False
# update database pointers for best chain
batch = leveldb.WriteBatch()
batch.Put('misc:total_work', hex(blkmeta.work))
batch.Put('misc:height', str(blkmeta.height))
batch.Put('misc:tophash', ser_hash)
self.logger.info("ChainDb: height %d, block %064x" % (
blkmeta.height, block.sha256))
# all TX's in block are connectable; index
neverseen = 0
for tx in block.vtx:
tx.calc_sha256()
if not self.mempool.remove(tx.sha256):
neverseen += 1
txidx = TxIdx(block.sha256)
if not self.puttxidx(tx.sha256, txidx, batch):
self.logger.info("TxIndex failed %064x" % (tx.sha256,))
return False
self.logger.info("MemPool: blk.vtx.sz %d, neverseen %d, poolsz %d" % (len(block.vtx), neverseen, self.mempool.size()))
# mark deps as spent
for outpt in outpts:
self.spend_txout(outpt[0], outpt[1], batch)
self.db.Write(batch)
return True
def disconnect_block(self, block):
ser_prevhash = ser_uint256(block.hashPrevBlock)
prevmeta = BlkMeta()
prevmeta.deserialize(self.db.Get('blkmeta:'+ser_prevhash))
tup = self.unique_outpts(block)
if tup is None:
return False
outpts = tup[0]
# mark deps as unspent
batch = leveldb.WriteBatch()
for outpt in outpts:
self.clear_txout(outpt[0], outpt[1], batch)
# update tx index and memory pool
for tx in block.vtx:
tx.calc_sha256()
ser_hash = ser_uint256(tx.sha256)
try:
batch.Delete('tx:'+ser_hash)
except KeyError:
pass
if not tx.is_coinbase():
self.mempool.add(tx)
# update database pointers for best chain
batch.Put('misc:total_work', hex(prevmeta.work))
batch.Put('misc:height', str(prevmeta.height))
batch.Put('misc:tophash', ser_prevhash)
self.db.Write(batch)
self.logger.info("ChainDb(disconn): height %d, block %064x" % (
prevmeta.height, block.hashPrevBlock))
return True
def getblockmeta(self, blkhash):
ser_hash = ser_uint256(blkhash)
try:
meta = BlkMeta()
meta.deserialize(self.db.Get('blkmeta:'+ser_hash))
except KeyError:
return None
return meta
def getblockheight(self, blkhash):
meta = self.getblockmeta(blkhash)
if meta is None:
return -1
return meta.height
def reorganize(self, new_best_blkhash):
self.logger.info("REORGANIZE")
conn = []
disconn = []
old_best_blkhash = self.gettophash()
fork = old_best_blkhash
longer = new_best_blkhash
while fork != longer:
while (self.getblockheight(longer) >
self.getblockheight(fork)):
block = self.getblock(longer)
block.calc_sha256()
conn.append(block)
longer = block.hashPrevBlock
if longer == 0:
return False
if fork == longer:
break
block = self.getblock(fork)
block.calc_sha256()
disconn.append(block)
fork = block.hashPrevBlock
if fork == 0:
return False
self.logger.info("REORG disconnecting top hash %064x" % (old_best_blkhash,))
self.logger.info("REORG connecting new top hash %064x" % (new_best_blkhash,))
self.logger.info("REORG chain union point %064x" % (fork,))
self.logger.info("REORG disconnecting %d blocks, connecting %d blocks" % (len(disconn), len(conn)))
for block in disconn:
if not self.disconnect_block(block):
return False
for block in conn:
if not self.connect_block(ser_uint256(block.sha256),
block, self.getblockmeta(block.sha256)):
return False
self.logger.info("REORGANIZE DONE")
return True
def set_best_chain(self, ser_prevhash, ser_hash, block, blkmeta):
# the easy case, extending current best chain
if (blkmeta.height == 0 or
self.db.Get('misc:tophash') == ser_prevhash):
return self.connect_block(ser_hash, block, blkmeta)
# switching from current chain to another, stronger chain
return self.reorganize(block.sha256)
def putoneblock(self, block):
block.calc_sha256()
if not block.is_valid():
self.logger.info("Invalid block %064x" % (block.sha256, ))
return False
if not self.have_prevblock(block):
self.orphans[block.sha256] = True
self.orphan_deps[block.hashPrevBlock] = block
self.logger.info("Orphan block %064x (%d orphans)" % (block.sha256, len(self.orphan_deps)))
return False
top_height = self.getheight()
top_work = long(self.db.Get('misc:total_work'), 16)
# read metadata for previous block
prevmeta = BlkMeta()
if top_height >= 0:
ser_prevhash = ser_uint256(block.hashPrevBlock)
prevmeta.deserialize(self.db.Get('blkmeta:'+ser_prevhash))
else:
ser_prevhash = ''
batch = leveldb.WriteBatch()
# build network "block" msg, as canonical disk storage form
msg = msg_block()
msg.block = block
msg_data = message_to_str(self.netmagic, msg)
# write "block" msg to storage
fpos = self.blk_write.tell()
self.blk_write.write(msg_data)
self.blk_write.flush()
# add index entry
ser_hash = ser_uint256(block.sha256)
batch.Put('blocks:'+ser_hash, str(fpos))
# store metadata related to this block
blkmeta = BlkMeta()
blkmeta.height = prevmeta.height + 1
blkmeta.work = (prevmeta.work +
uint256_from_compact(block.nBits))
batch.Put('blkmeta:'+ser_hash, blkmeta.serialize())
# store list of blocks at this height
heightidx = HeightIdx()
heightstr = str(blkmeta.height)
try:
heightidx.deserialize(self.db.Get('height:'+heightstr))
except KeyError:
pass
heightidx.blocks.append(block.sha256)
batch.Put('height:'+heightstr, heightidx.serialize())
self.db.Write(batch)
# if chain is not best chain, proceed no further
if (blkmeta.work <= top_work):
self.logger.info("ChainDb: height %d (weak), block %064x" % (blkmeta.height, block.sha256))
return True
# update global chain pointers
if not self.set_best_chain(ser_prevhash, ser_hash,
block, blkmeta):
return False
return True
def putblock(self, block):
block.calc_sha256()
if self.haveblock(block.sha256, True):
self.logger.info("Duplicate block %064x submitted" % (block.sha256, ))
return False
if not self.putoneblock(block):
return False
blkhash = block.sha256
while blkhash in self.orphan_deps:
block = self.orphan_deps[blkhash]
if not self.putoneblock(block):
return True
del self.orphan_deps[blkhash]
del self.orphans[block.sha256]
blkhash = block.sha256
return True
def locate(self, locator):
for hash in locator.vHave:
ser_hash = ser_uint256(hash)
if ser_hash in self.blkmeta:
blkmeta = BlkMeta()
blkmeta.deserialize(self.db.Get('blkmeta:'+ser_hash))
return blkmeta
return 0
def getheight(self):
return int(self.db.Get('misc:height'))
def gettophash(self):
return uint256_from_str(self.db.Get('misc:tophash'))
def loadfile(self, filename):
fd = os.open(filename, os.O_RDONLY)
self.logger.info("IMPORTING DATA FROM " + filename)
buf = ''
wanted = 4096
while True:
if wanted > 0:
if wanted < 4096:
wanted = 4096
s = os.read(fd, wanted)
if len(s) == 0:
break
buf += s
wanted = 0
buflen = len(buf)
startpos = string.find(buf, self.netmagic.msg_start)
if startpos < 0:
wanted = 8
continue
sizepos = startpos + 4
blkpos = startpos + 8
if blkpos > buflen:
wanted = 8
continue
blksize = struct.unpack("<i", buf[sizepos:blkpos])[0]
# print "blkpos: ", blkpos, "blksize: ", blksize, "buflen: ", buflen
if (blkpos + blksize) > buflen:
wanted = 8 + blksize
continue
ser_blk = buf[blkpos:blkpos+blksize]
buf = buf[blkpos+blksize:]
f = cStringIO.StringIO(ser_blk)
block = CBlock()
block.deserialize(f)
# print("adding the genesis block")
self.putblock(block)
def newblock_txs(self):
txlist = []
for tx in self.mempool.pool.itervalues():
# query finalized, non-coinbase mempool tx's
if tx.is_coinbase() or not tx.is_final():
continue
# iterate through inputs, calculate total input value
valid = True
nValueIn = 0
nValueOut = 0
dPriority = Decimal(0)
for tin in tx.vin:
in_tx = self.gettx(tin.prevout.hash)
if (in_tx is None or
tin.prevout.n >= len(in_tx.vout)):
valid = False
else:
v = in_tx.vout[tin.prevout.n].nValue
nValueIn += v
dPriority += Decimal(v * 1)
if not valid:
continue
# iterate through outputs, calculate total output value
for txout in tx.vout:
nValueOut += txout.nValue
# calculate fees paid, if any
tx.nFeesPaid = nValueIn - nValueOut
if tx.nFeesPaid < 0:
continue
# calculate fee-per-KB and priority
tx.ser_size = len(tx.serialize())
dPriority /= Decimal(tx.ser_size)
tx.dFeePerKB = (Decimal(tx.nFeesPaid) /
(Decimal(tx.ser_size) / Decimal(1000)))
if tx.dFeePerKB < Decimal(50000):
tx.dFeePerKB = Decimal(0)
tx.dPriority = dPriority
txlist.append(tx)
# sort list by fee-per-kb, then priority
sorted_txlist = sorted(txlist, cmp=tx_blk_cmp, reverse=True)
# build final list of transactions. thanks to sort
# order above, we add TX's to the block in the
# highest-fee-first order. free transactions are
# then appended in order of priority, until
# free_bytes is exhausted.
txlist = []
txlist_bytes = 0
free_bytes = 50000
while len(sorted_txlist) > 0:
tx = sorted_txlist.pop()
if txlist_bytes + tx.ser_size > (900 * 1000):
continue
if tx.dFeePerKB > 0:
txlist.append(tx)
txlist_bytes += tx.ser_size
elif free_bytes >= tx.ser_size:
txlist.append(tx)
txlist_bytes += tx.ser_size
free_bytes -= tx.ser_size
return txlist
def newblock(self):
tophash = self.gettophash()
# print "Tophash: ", tophash
prevblock = self.getblock(tophash)
if prevblock is None:
return None
# obtain list of candidate transactions for a new block
total_fees = 0
txlist = self.newblock_txs()
for tx in txlist:
total_fees += tx.nFeesPaid
# build coinbase
txin = CTxIn()
txin.prevout.set_null()
txin.coinbase = "COINBASE TX"
txout = CTxOut()
txout.nValue = block_value(self.getheight(), total_fees)
public_key, address = self.wallet.getnewaddress()
txout.scriptPubKey = utils.public_key_to_pay_to_pubkey(public_key)
coinbase = CTransaction()
coinbase.vin.append(txin)
coinbase.vout.append(txout)
# build block
block = CBlock()
block.hashPrevBlock = tophash
block.nTime = int(time.time())
block.nBits = prevblock.nBits # FIXME
block.vtx.append(coinbase)
block.vtx.extend(txlist)
block.hashMerkleRoot = block.calc_merkle()
return block
|
{
"content_hash": "57ad84c724569d8dda280c6f48dec398",
"timestamp": "",
"source": "github",
"line_count": 835,
"max_line_length": 126,
"avg_line_length": 32.18802395209581,
"alnum_prop": 0.5402016594113926,
"repo_name": "obulpathi/bitcoinpy",
"id": "f89cc1923ab31fb4382f3bc3f775521dc1ef8fe5",
"size": "27064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitcoinpy/chaindb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "993"
},
{
"name": "Python",
"bytes": "301936"
}
],
"symlink_target": ""
}
|
'''
Salt module to manage RAID arrays with mdadm
'''
# Import python libs
import os
import logging
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError
# Set up logger
log = logging.getLogger(__name__)
# Define a function alias in order not to shadow built-in's
__func_alias__ = {
'list_': 'list'
}
# Define the module's virtual name
__virtualname__ = 'raid'
def __virtual__():
'''
mdadm provides raid functions for Linux
'''
if __grains__['kernel'] != 'Linux':
return False
if not salt.utils.which('mdadm'):
return False
return __virtualname__
def list_():
'''
List the RAID devices.
CLI Example:
.. code-block:: bash
salt '*' raid.list
'''
ret = {}
for line in (__salt__['cmd.run_stdout']
('mdadm --detail --scan').splitlines()):
if ' ' not in line:
continue
comps = line.split()
device = comps[1]
ret[device] = {"device": device}
for comp in comps[2:]:
key = comp.split('=')[0].lower()
value = comp.split('=')[1]
ret[device][key] = value
return ret
def detail(device='/dev/md0'):
'''
Show detail for a specified RAID device
CLI Example:
.. code-block:: bash
salt '*' raid.detail '/dev/md0'
'''
ret = {}
ret['members'] = {}
# Lets make sure the device exists before running mdadm
if not os.path.exists(device):
msg = "Device {0} doesn't exist!"
raise CommandExecutionError(msg.format(device))
cmd = 'mdadm --detail {0}'.format(device)
for line in __salt__['cmd.run_stdout'](cmd).splitlines():
if line.startswith(device):
continue
if ' ' not in line:
continue
if not ':' in line:
if '/dev/' in line:
comps = line.split()
state = comps[4:-1]
ret['members'][comps[0]] = {
'device': comps[-1],
'major': comps[1],
'minor': comps[2],
'number': comps[0],
'raiddevice': comps[3],
'state': ' '.join(state),
}
continue
comps = line.split(' : ')
comps[0] = comps[0].lower()
comps[0] = comps[0].strip()
comps[0] = comps[0].replace(' ', '_')
ret[comps[0]] = comps[1].strip()
return ret
def destroy(device):
'''
Destroy a RAID device.
WARNING This will zero the superblock of all members of the RAID array..
CLI Example:
.. code-block:: bash
salt '*' raid.destroy /dev/md0
'''
try:
details = detail(device)
except CommandExecutionError:
return False
stop_cmd = 'mdadm --stop {0}'.format(device)
zero_cmd = 'mdadm --zero-superblock {0}'
if __salt__['cmd.retcode'](stop_cmd):
for number in details['members']:
__salt__['cmd.retcode'](zero_cmd.format(number['device']))
if __salt__['raid.list']().get(device) is None:
return True
else:
return False
def create(*args):
'''
Create a RAID device.
.. warning::
Use with CAUTION, as this function can be very destructive if not used
properly!
Use this module just as a regular mdadm command.
For more info, read the ``mdadm(8)`` manpage
NOTE: It takes time to create a RAID array. You can check the progress in
"resync_status:" field of the results from the following command:
.. code-block:: bash
salt '*' raid.detail /dev/md0
CLI Examples:
.. code-block:: bash
salt '*' raid.create /dev/md0 level=1 chunk=256 raid-devices=2 /dev/xvdd /dev/xvde test_mode=True
.. note:: Test mode
Adding ``test_mode=True`` as an argument will print out the mdadm
command that would have been run.
:param args: The arguments u pass to this function.
:param arguments:
arguments['new_array']: The name of the new RAID array that will be created.
arguments['opt_val']: Option with Value. Example: raid-devices=2
arguments['opt_raw']: Option without Value. Example: force
arguments['disks_to_array']: The disks that will be added to the new raid.
:return:
test_mode=True:
Prints out the full command.
test_mode=False (Default):
Executes command on remote the host(s) and
Prints out the mdadm output.
'''
test_mode = False
arguments = {'new_array': '', 'opt_val': {}, 'opt_raw': [], "disks_to_array": []}
for arg in args:
if arg.startswith('test_mode'):
test_mode = bool(arg.split('=')[-1])
elif arg.startswith('/dev/') is True:
if arg.startswith('/dev/md') is True:
arguments['new_array'] = arg
else:
arguments['disks_to_array'].append(arg)
elif '=' in arg:
opt, val = arg.split('=')
arguments['opt_val'][opt] = val
elif str(arg) in ['readonly', 'run', 'force']:
arguments['opt_raw'].append(arg)
elif str(arg) in ['missing']:
arguments['disks_to_array'].append(arg)
else:
msg = "Invalid argument - {0} !"
raise CommandExecutionError(msg.format(arg))
cmd = "echo y | mdadm --create --verbose {new_array}{opts_raw}{opts_val} {disks_to_array}"
cmd = cmd.format(new_array=arguments['new_array'],
opts_raw=(' --' + ' --'.join(arguments['opt_raw'])
if len(arguments['opt_raw']) > 0
else ''),
opts_val=(' --' + ' --'.join(key + '=' + arguments['opt_val'][key] for key in arguments['opt_val'])
if len(arguments['opt_val']) > 0
else ''),
disks_to_array=' '.join(arguments['disks_to_array']))
if test_mode is True:
return cmd
elif test_mode is False:
return __salt__['cmd.run'](cmd)
|
{
"content_hash": "7d4ba6e59050d953910e6d73f805de36",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 120,
"avg_line_length": 28.572093023255814,
"alnum_prop": 0.5349177926094743,
"repo_name": "victorywang80/Maintenance",
"id": "0001c89e56d810b53ac25ff48ed96a0ea653f5aa",
"size": "6167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saltstack/src/salt/modules/mdadm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160954"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "4522522"
},
{
"name": "Scheme",
"bytes": "7488"
},
{
"name": "Shell",
"bytes": "14653"
}
],
"symlink_target": ""
}
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.exceptions import APIException
from django.db.models import Q
from frontend.models import Practice, PCT, STP, RegionalTeam, PCN
from . import view_utils as utils
from matrixstore.db import get_db, get_row_grouper
STATS_COLUMN_WHITELIST = (
"total_list_size",
"astro_pu_items",
"astro_pu_cost",
"nothing",
)
class KeysNotValid(APIException):
status_code = 400
default_detail = "The keys you provided are not supported"
@api_view(["GET"])
def org_details(request, format=None):
"""
Get list size and ASTRO-PU by month, for CCGs or practices.
"""
org_type = request.GET.get("org_type", None)
keys = utils.param_to_list(request.query_params.get("keys", []))
org_codes = utils.param_to_list(request.query_params.get("org", []))
if org_type is None:
org_type = "all_practices"
orgs = _get_orgs(org_type, org_codes)
data = _get_practice_stats_entries(keys, org_type, orgs)
return Response(list(data))
def _get_orgs(org_type, org_codes):
if org_type == "practice":
orgs = Practice.objects.order_by("code").only("code", "name")
if org_codes:
orgs = orgs.filter(Q(code__in=org_codes) | Q(ccg_id__in=org_codes))
elif org_type == "ccg":
orgs = PCT.objects.filter(org_type="CCG").order_by("code").only("code", "name")
if org_codes:
orgs = orgs.filter(code__in=org_codes)
elif org_type == "pcn":
orgs = PCN.objects.order_by("code").only("code", "name")
if org_codes:
orgs = orgs.filter(code__in=org_codes)
elif org_type == "stp":
orgs = STP.objects.order_by("ons_code").only("ons_code", "name")
if org_codes:
orgs = orgs.filter(ons_code__in=org_codes)
elif org_type == "regional_team":
orgs = RegionalTeam.objects.order_by("code").only("code", "name")
if org_codes:
orgs = orgs.filter(code__in=org_codes)
elif org_type == "all_practices":
orgs = []
else:
raise ValueError("Unknown org_type: {}".format(org_type))
return orgs
def _get_practice_stats_entries(keys, org_type, orgs):
db = get_db()
practice_stats = db.query(*_get_query_and_params(keys))
group_by_org = get_row_grouper(org_type)
practice_stats = [
(name, group_by_org.sum(matrix)) for (name, matrix) in practice_stats
]
# `group_by_org.offsets` maps each organisation's primary key to its row
# offset within the matrices. We pair each organisation with its row
# offset, ignoring those organisations which aren't in the mapping (which
# implies that we have no statistics for them)
org_offsets = [
(org, group_by_org.offsets[org.pk])
for org in orgs
if org.pk in group_by_org.offsets
]
# For the "all_practices" grouping we have no orgs and just a single row
if org_type == "all_practices":
org_offsets = [(None, 0)]
date_offsets = sorted(db.date_offsets.items())
# Yield entries for each organisation on each date
for date, col_offset in date_offsets:
for org, row_offset in org_offsets:
entry = {"date": date}
if org is not None:
entry["row_id"] = org.pk
entry["row_name"] = org.name
index = (row_offset, col_offset)
star_pu = {}
has_value = False
for name, matrix in practice_stats:
value = matrix[index]
if value != 0:
has_value = True
if name == "nothing":
value = 1
if isinstance(value, float):
value = round(value, 2)
if name.startswith("star_pu."):
star_pu[name[8:]] = value
else:
entry[name] = value
if star_pu:
entry["star_pu"] = star_pu
if has_value:
yield entry
def _get_query_and_params(keys):
params = []
for key in keys:
if key == "nothing":
pass
elif key in STATS_COLUMN_WHITELIST or key.startswith("star_pu."):
params.append(key)
else:
raise KeysNotValid("%s is not a valid key" % key)
if keys:
# `params` might be empty here because the only key supplied was
# "nothing", but that's fine: the empty IN clause won't match any rows,
# which is what we want
where = "name IN ({})".format(",".join("?" * len(params)))
else:
# If no keys are supplied we treat this as an implicit "select all"
where = "1=1"
query = "SELECT name, value FROM practice_statistic WHERE {}".format(where)
# The special "nothing" key always evaluates to 1, but to match the
# previous API we should only return these "nothing" entries where there
# exist statistics for that organsation and date. So we use the
# total_list_size matrix, and only return entries where that has a non-zero
# value
if "nothing" in keys:
query += """
UNION ALL
SELECT "nothing" AS name, value
FROM practice_statistic
WHERE name="total_list_size"
"""
return query, params
|
{
"content_hash": "0cb03ffc95f139e2207ed6e18906652c",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 87,
"avg_line_length": 37.00689655172414,
"alnum_prop": 0.5879612374207976,
"repo_name": "annapowellsmith/openpresc",
"id": "ff6ee903fd5f44759488a212060ec8af5d8bbec3",
"size": "5366",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "openprescribing/api/views_org_details.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "95907"
},
{
"name": "HTML",
"bytes": "68653"
},
{
"name": "JavaScript",
"bytes": "14332669"
},
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "352287"
},
{
"name": "Shell",
"bytes": "3537"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='t4_lambda_s3select',
version='0.0.1',
py_modules=['index'],
)
|
{
"content_hash": "14212d11e3177817cc6b62fbb00b9b2b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 30,
"avg_line_length": 16.714285714285715,
"alnum_prop": 0.6324786324786325,
"repo_name": "quiltdata/quilt-compiler",
"id": "cfe1d24af29363150c4d7374c38ea9b7a2b4b8ef",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambdas/s3select/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "HTML",
"bytes": "22745"
},
{
"name": "JavaScript",
"bytes": "203575"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "383040"
},
{
"name": "Shell",
"bytes": "1586"
}
],
"symlink_target": ""
}
|
from typing import Any, Optional
from cached_property import cached_property
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.athena import AWSAthenaHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class AthenaSensor(BaseSensorOperator):
"""
Asks for the state of the Query until it reaches a failure state or success state.
If the query fails, the task will fail.
:param query_execution_id: query_execution_id to check the state of
:type query_execution_id: str
:param max_retries: Number of times to poll for query state before
returning the current state, defaults to None
:type max_retries: int
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:type aws_conn_id: str
:param sleep_time: Time in seconds to wait between two consecutive call to
check query status on athena, defaults to 10
:type sleep_time: int
"""
INTERMEDIATE_STATES = (
'QUEUED',
'RUNNING',
)
FAILURE_STATES = (
'FAILED',
'CANCELLED',
)
SUCCESS_STATES = ('SUCCEEDED',)
template_fields = ['query_execution_id']
template_ext = ()
ui_color = '#66c3ff'
@apply_defaults
def __init__(
self,
*,
query_execution_id: str,
max_retries: Optional[int] = None,
aws_conn_id: str = 'aws_default',
sleep_time: int = 10,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.query_execution_id = query_execution_id
self.sleep_time = sleep_time
self.max_retries = max_retries
def poke(self, context: dict) -> bool:
state = self.hook.poll_query_status(self.query_execution_id, self.max_retries)
if state in self.FAILURE_STATES:
raise AirflowException('Athena sensor failed')
if state in self.INTERMEDIATE_STATES:
return False
return True
@cached_property
def hook(self) -> AWSAthenaHook:
"""Create and return an AWSAthenaHook"""
return AWSAthenaHook(self.aws_conn_id, self.sleep_time)
|
{
"content_hash": "0466327d98fb2f227c7e45f5c06458bf",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 86,
"avg_line_length": 31.535211267605632,
"alnum_prop": 0.6507369361322018,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "40c028a41c4c43ddaba3b5c69fff1e9690516ef8",
"size": "3026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/amazon/aws/sensors/athena.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
}
|
import sqlite3
import os
import simplejson as json
db = sqlite3.connect("../data/pbp.db")
c = db.cursor()
# Fetch all scoreboard paths
scoreboard_paths = [];
for root, dirs, files in os.walk("..\\data\\scoreboards"):
for file in files:
if file.endswith("scoreboard.json"):
scoreboard_paths.append(os.path.join(root, file))
# For each scoreboard_path...
# 1. INSERT OR IGNORE INTO teams (team_id, team_alias, team_name, team_mascot, team_div, team_conf) VALUES ()
# 2. INSERT OR IGNORE INTO games (game_id, date, home_team_id, away_team_id, json) VALUES ()
c.execute('DELETE FROM teams')
c.execute('DELETE FROM games')
for scoreboard_path in scoreboard_paths:
scoreboard = json.load(open(scoreboard_path))
for contest in range(len(scoreboard['contests'])):
home_team_id = scoreboard['contests'][contest]['homeTeam']['id']
home_team_alias = scoreboard['contests'][contest]['homeTeam']['abrv']
home_team_name = scoreboard['contests'][contest]['homeTeam']['name']
home_team_mascot = scoreboard['contests'][contest]['homeTeam']['mascot']
home_team_div = scoreboard['contests'][contest]['homeTeam']['division']
home_team_conf = scoreboard['contests'][contest]['homeTeam']['conference']
away_team_id = scoreboard['contests'][contest]['visitorTeam']['id']
away_team_alias = scoreboard['contests'][contest]['visitorTeam']['abrv']
away_team_name = scoreboard['contests'][contest]['visitorTeam']['name']
away_team_mascot = scoreboard['contests'][contest]['visitorTeam']['mascot']
away_team_div = scoreboard['contests'][contest]['visitorTeam']['division']
away_team_conf = scoreboard['contests'][contest]['visitorTeam']['conference']
game_id = scoreboard['contests'][contest]['id']
date = scoreboard['contests'][contest]['dateYearMonthDay']
year = scoreboard['contests'][contest]['seasonYear']
venue_name = scoreboard['contests'][contest]['venue']['venueName']
venue_city = scoreboard['contests'][contest]['venue']['venueCity']
venue_state = scoreboard['contests'][contest]['venue']['venueState']
c.execute('INSERT OR IGNORE INTO teams VALUES (?, ?, ?, ?, ?, ?)',
(home_team_id, home_team_alias, home_team_name, home_team_mascot, home_team_div, home_team_conf))
c.execute('INSERT OR IGNORE INTO teams VALUES (?, ?, ?, ?, ?, ?)',
(away_team_id, away_team_alias, away_team_name, away_team_mascot, away_team_div, away_team_conf))
c.execute('INSERT OR IGNORE INTO games VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(game_id, date, year, home_team_id, away_team_id, venue_name, venue_city, venue_state))
c.execute('INSERT OR IGNORE INTO games VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(game_id, date, year, home_team_id, away_team_id, venue_name, venue_city, venue_state))
print("Inserted records into teams and games from", scoreboard_path)
db.commit()
|
{
"content_hash": "dd81fc63204f2ae99fdea71691dd17f7",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 109,
"avg_line_length": 49.03508771929825,
"alnum_prop": 0.6869409660107334,
"repo_name": "albertlyu/ncaab-pbp",
"id": "032f5e4bc55fca7a42fda0a4df9ea1d3d1792d82",
"size": "2795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/sqlite_insert_games.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23290"
}
],
"symlink_target": ""
}
|
"""
Algorithm:
Feature Weighting
Viewpoints Labels
Choose Examplars based on Scores
Normalizing Scores
Per Name
Incremental Version
"""
from __future__ import absolute_import, division, print_function
import utool as ut
import numpy as np
from ibeis.algo.hots.smk import smk_debug
from vtool import patch as ptool
from vtool import image as gtool
import six
import scipy.stats.mstats as spms
from os.path import join
from os.path import basename
import scipy.spatial.distance as spdist
from collections import namedtuple
(print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[smk_plots]')
Metrics = namedtuple('Metrics', ('wx2_nMembers', 'wx2_pdist_stats', 'wx2_wdist_stats',))
def vizualize_vocabulary(ibs, invindex):
"""
cleaned up version of dump_word_patches. Makes idf scatter plots and dumps
the patches that contributed to each word.
CommandLine:
python -m ibeis.algo.hots.smk.smk_plots --test-vizualize_vocabulary
python -m ibeis.algo.hots.smk.smk_plots --test-vizualize_vocabulary --vf
Example:
>>> from ibeis.algo.hots.smk.smk_plots import * # NOQA
>>> from ibeis.algo.hots.smk import smk_debug
>>> from ibeis.algo.hots.smk import smk_repr
>>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=64000)
>>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=8000)
>>> tup = smk_debug.testdata_raw_internals0(db='PZ_Master0', nWords=64000)
>>> #tup = smk_debug.testdata_raw_internals0(db='PZ_Mothers', nWords=8000)
>>> ibs, annots_df, daids, qaids, invindex, qreq_ = tup
>>> smk_repr.compute_data_internals_(invindex, qreq_.qparams, delete_rawvecs=False)
>>> vizualize_vocabulary(ibs, invindex)
"""
invindex.idx2_wxs = np.array(invindex.idx2_wxs)
print('[smk_plots] Vizualizing vocabulary')
# DUMPING PART --- dumps patches to disk
figdir = ibs.get_fig_dir()
ut.ensuredir(figdir)
if ut.get_argflag('--vf'):
ut.view_directory(figdir)
# Compute Word Statistics
metrics = compute_word_metrics(invindex)
wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats = metrics
#(wx2_pdist, wx2_wdist, wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats) = metrics
#wx2_prad = {wx: pdist_stats['max'] for wx, pdist_stats in six.iteritems(wx2_pdist_stats) if 'max' in pdist_stats}
#wx2_wrad = {wx: wdist_stats['max'] for wx, wdist_stats in six.iteritems(wx2_wdist_stats) if 'max' in wdist_stats}
wx2_prad = {wx: stats['max'] for wx, stats in wx2_pdist_stats.items() if 'max' in stats}
wx2_wrad = {wx: stats['max'] for wx, stats in wx2_wdist_stats.items() if 'max' in stats}
#wx2_prad = get_metric(metrics, 'wx2_pdist_stats', 'max')
#wx2_wrad = get_metric(metrics, 'wx2_wdist_stats', 'max')
wx_sample1 = select_by_metric(wx2_nMembers)
wx_sample2 = select_by_metric(wx2_prad)
wx_sample3 = select_by_metric(wx2_wrad)
wx_sample = wx_sample1 + wx_sample2 + wx_sample3
overlap123 = len(wx_sample) - len(set(wx_sample))
print('overlap123 = %r' % overlap123)
wx_sample = set(wx_sample)
print('len(wx_sample) = %r' % len(wx_sample))
#make_scatterplots(ibs, figdir, invindex, metrics)
vocabdir = join(figdir, 'vocab_patches2')
wx2_dpath = get_word_dpaths(vocabdir, wx_sample, metrics)
make_wordfigures(ibs, metrics, invindex, figdir, wx_sample, wx2_dpath)
def metric_clamped_stat(metrics, wx_list, key):
"""
if key is a tuple it specifies a statdict and a chosen stat
else its just a key
"""
try:
if isinstance(key, tuple):
metrickey, statkey = key
wx2_statdict = metrics.__dict__[metrickey]
def wx2_metric(wx):
return wx2_statdict[wx][statkey] if wx in wx2_statdict and statkey in wx2_statdict[wx] else -1
stat_list = np.array([wx2_metric(wx) for wx in wx_list])
else:
wx2_metric = metrics.__dict__[key]
stat_list = np.array([wx2_metric[wx] for wx in wx_list])
stat_list = ut.negative_minclamp_inplace(stat_list)
except Exception as ex:
ut.printex(ex, keys=['key'])
return stat_list
def compute_word_metrics(invindex):
invindex.idx2_wxs = np.array(invindex.idx2_wxs)
wx2_idxs = invindex.wx2_idxs
idx2_dvec = invindex.idx2_dvec
words = invindex.words
wx2_pdist = {}
wx2_wdist = {}
wx2_nMembers = {}
wx2_pdist_stats = {}
wx2_wdist_stats = {}
wordidx_iter = ut.progiter(six.iteritems(wx2_idxs), lbl='Word Dists: ', num=len(wx2_idxs), freq=200)
for _item in wordidx_iter:
wx, idxs = _item
dvecs = idx2_dvec.take(idxs, axis=0)
word = words[wx:wx + 1]
wx2_pdist[wx] = spdist.pdist(dvecs) # pairwise dist between words
wx2_wdist[wx] = ut.euclidean_dist(dvecs, word) # dist to word center
wx2_nMembers[wx] = len(idxs)
for wx, pdist in ut.progiter(six.iteritems(wx2_pdist), lbl='Word pdist Stats: ', num=len(wx2_idxs), freq=2000):
wx2_pdist_stats[wx] = ut.get_stats(pdist)
for wx, wdist in ut.progiter(six.iteritems(wx2_wdist), lbl='Word wdist Stats: ', num=len(wx2_idxs), freq=2000):
wx2_wdist_stats[wx] = ut.get_stats(wdist)
ut.print_stats(wx2_nMembers.values(), 'word members')
metrics = Metrics(wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats)
return metrics
#word_pdist = spdist.pdist(invindex.words)
def draw_scatterplot(figdir, ibs, datax, datay, xlabel, ylabel, color, fnum=None):
from plottool import df2
datac = [color for _ in range(len(datax))]
assert len(datax) == len(datay), '%r %r' % (len(datax), len(datay))
df2.figure(fnum=fnum, doclf=True, docla=True)
df2.plt.scatter(datax, datay, c=datac, s=20, marker='o', alpha=.9)
ax = df2.gca()
title = '%s vs %s.\nnWords=%r. db=%r' % (xlabel, ylabel, len(datax), ibs.get_dbname())
df2.set_xlabel(xlabel)
df2.set_ylabel(ylabel)
ax.set_ylim(min(datay) - 1, max(datay) + 1)
ax.set_xlim(min(datax) - 1, max(datax) + 1)
df2.dark_background()
df2.set_figtitle(title)
figpath = join(figdir, title)
df2.save_figure(fnum, figpath)
def dump_word_patches(ibs, vocabdir, invindex, wx_sample, metrics):
"""
Dumps word member patches to disk
"""
wx2_dpath = get_word_dpaths(vocabdir, wx_sample, metrics)
# Write each patch from each annotation to disk
idx2_daid = invindex.idx2_daid
daids = invindex.daids
idx2_dfx = invindex.idx2_dfx
#maws_list = invindex.idx2_wxs[idxs]
# Loop over all annotations skipping the ones without any words in the sample
ax2_idxs = [np.where(idx2_daid == aid_)[0] for aid_ in ut.progiter(daids, 'Building Forward Index: ', freq=100)]
patchdump_iter = ut.progiter(zip(daids, ax2_idxs), freq=1,
lbl='Dumping Selected Patches: ', num=len(daids))
for aid, idxs in patchdump_iter:
wxs_list = invindex.idx2_wxs[idxs]
if len(set(ut.flatten(wxs_list)).intersection(set(wx_sample))) == 0:
# skip this annotation
continue
fx_list = idx2_dfx[idxs]
chip = ibs.get_annot_chips(aid)
chip_kpts = ibs.get_annot_kpts(aid)
nid = ibs.get_annot_name_rowids(aid)
patches, subkpts = ptool.get_warped_patches(chip, chip_kpts)
for fx, wxs, patch in zip(fx_list, wxs_list, patches):
assert len(wxs) == 1, 'did you multiassign the database? If so implement it here too'
for k, wx in enumerate(wxs):
if wx not in wx_sample:
continue
patch_fname = 'patch_nid=%04d_aid=%04d_fx=%04d_k=%d' % (nid, aid, fx, k)
fpath = join(wx2_dpath[wx], patch_fname)
#gtool.imwrite(fpath, patch, fallback=True)
gtool.imwrite_fallback(fpath, patch)
def get_word_dname(wx, metrics):
stats_ = metrics.wx2_wdist_stats[wx]
wname_clean = 'wx=%06d' % wx
stats1 = 'max={max},min={min},mean={mean},'.format(**stats_)
stats2 = 'std={std},nMaxMin=({nMax},{nMin}),shape={shape}'.format(**stats_)
fname_fmt = wname_clean + '_{stats1}{stats2}'
fmt_dict = dict(stats1=stats1, stats2=stats2)
word_dname = ut.long_fname_format(fname_fmt, fmt_dict, ['stats2', 'stats1'], max_len=250, hashlen=4)
return word_dname
def get_word_dpaths(vocabdir, wx_sample, metrics):
"""
Gets word folder names and ensure they exist
"""
ut.ensuredir(vocabdir)
wx2_dpath = {wx: join(vocabdir, get_word_dname(wx, metrics)) for wx in wx_sample}
iter_ = ut.progiter(six.itervalues(wx2_dpath), lbl='Ensuring word_dpath: ', freq=200)
for dpath in iter_:
ut.ensuredir(dpath)
return wx2_dpath
def select_by_metric(wx2_metric, per_quantile=20):
# sample a few words around the quantile points
metric_list = np.array(list(wx2_metric.values()))
wx_list = np.array(list(wx2_metric.keys()))
metric_quantiles = spms.mquantiles(metric_list)
metric_quantiles = np.array(metric_quantiles.tolist() + [metric_list.max(), metric_list.min()])
wx_interest = []
for scalar in metric_quantiles:
dist = (metric_list - scalar) ** 2
wx_quantile = wx_list[dist.argsort()[0:per_quantile]]
wx_interest.extend(wx_quantile.tolist())
overlap = len(wx_interest) - len(set(wx_interest))
if overlap > 0:
print('warning: overlap=%r' % overlap)
return wx_interest
def get_metric(metrics, tupkey, statkey=None):
wx2_metric = metrics.__dict__[tupkey]
if statkey is not None:
wx2_submetric = [stats_[statkey] for wx, stats_ in six.iteritems(wx2_metric) if statkey in stats_]
return wx2_submetric
return wx2_metric
#{wx: pdist_stats['max'] for wx, pdist_stats in six.iteritems(wx2_pdist_stats) if 'max' in pdist_stats}
#wx2_wrad = {wx: wdist_stats['max'] for wx, wdist_stats in six.iteritems(wx2_wdist_stats) if 'max' in wdist_stats}
def make_scatterplots(ibs, figdir, invindex, metrics):
from plottool import draw_func2 as df2
wx2_pdist_stats = metrics.wx2_pdist_stats
wx2_wdist_stats = metrics.wx2_pdist_stats
wx2_nMembers = metrics.wx2_nMembers
def wx2_avepdist(wx):
return wx2_pdist_stats[wx]['mean'] if wx in wx2_pdist_stats and 'mean' in wx2_pdist_stats[wx] else -1
def wx2_avewdist(wx):
return wx2_wdist_stats[wx]['mean'] if wx in wx2_wdist_stats and 'mean' in wx2_wdist_stats[wx] else -1
wx2_idf = invindex.wx2_idf
# data
wx_list = list(wx2_idf.keys())
idf_list = [wx2_idf[wx] for wx in wx_list]
nPoints_list = [wx2_nMembers[wx] if wx in wx2_nMembers else -1 for wx in wx_list]
avepdist_list = [wx2_avepdist(wx) for wx in wx_list]
avewdist_list = [wx2_avewdist(wx) for wx in wx_list]
df2.reset()
draw_scatterplot(figdir, ibs, idf_list, avepdist_list, 'idf', 'mean(pdist)', df2.WHITE, fnum=1)
draw_scatterplot(figdir, ibs, idf_list, avewdist_list, 'idf', 'mean(wdist)', df2.PINK, fnum=3)
draw_scatterplot(figdir, ibs, nPoints_list, avepdist_list, 'nPointsInWord', 'mean(pdist)', df2.GREEN, fnum=2)
draw_scatterplot(figdir, ibs, avepdist_list, avewdist_list, 'mean(pdist)', 'mean(wdist)', df2.YELLOW, fnum=4)
draw_scatterplot(figdir, ibs, nPoints_list, avewdist_list, 'nPointsInWord', 'mean(wdist)', df2.ORANGE, fnum=5)
draw_scatterplot(figdir, ibs, idf_list, nPoints_list, 'idf', 'nPointsInWord', df2.LIGHT_BLUE, fnum=6)
#df2.present()
def make_wordfigures(ibs, metrics, invindex, figdir, wx_sample, wx2_dpath):
"""
Builds mosaics of patches assigned to words in sample
ouptuts them to disk
"""
from plottool import draw_func2 as df2
import vtool as vt
import parse
vocabdir = join(figdir, 'vocab_patches2')
ut.ensuredir(vocabdir)
dump_word_patches(ibs, vocabdir, invindex, wx_sample, metrics)
# COLLECTING PART --- collects patches in word folders
#vocabdir
seldpath = vocabdir + '_selected'
ut.ensurepath(seldpath)
# stack for show
for wx, dpath in ut.progiter(six.iteritems(wx2_dpath), lbl='Dumping Word Images:', num=len(wx2_dpath), freq=1, backspace=False):
#df2.rrr()
fpath_list = ut.ls(dpath)
fname_list = [basename(fpath_) for fpath_ in fpath_list]
patch_list = [vt.imread(fpath_) for fpath_ in fpath_list]
# color each patch by nid
nid_list = [int(parse.parse('{}_nid={nid}_{}', fname)['nid']) for fname in fname_list]
nid_set = set(nid_list)
nid_list = np.array(nid_list)
if len(nid_list) == len(nid_set):
# no duplicate names
newpatch_list = patch_list
else:
# duplicate names. do coloring
sortx = nid_list.argsort()
patch_list = np.array(patch_list, dtype=object)[sortx]
fname_list = np.array(fname_list, dtype=object)[sortx]
nid_list = nid_list[sortx]
colors = (255 * np.array(df2.distinct_colors(len(nid_set)))).astype(np.int32)
color_dict = dict(zip(nid_set, colors))
wpad, hpad = 3, 3
newshape_list = [tuple((np.array(patch.shape) + (wpad * 2, hpad * 2, 0)).tolist()) for patch in patch_list]
color_list = [color_dict[nid_] for nid_ in nid_list]
newpatch_list = [np.zeros(shape) + color[None, None] for shape, color in zip(newshape_list, color_list)]
for patch, newpatch in zip(patch_list, newpatch_list):
newpatch[wpad:-wpad, hpad:-hpad, :] = patch
#img_list = patch_list
#bigpatch = vt.stack_image_recurse(patch_list)
#bigpatch = vt.stack_image_list(patch_list, vert=False)
bigpatch = vt.stack_square_images(newpatch_list)
bigpatch_fpath = join(seldpath, basename(dpath) + '_patches.png')
#
def _dictstr(dict_):
str_ = ut.dict_str(dict_, newlines=False)
str_ = str_.replace('\'', '').replace(': ', '=').strip('{},')
return str_
figtitle = '\n'.join([
'wx=%r' % wx,
'stat(pdist): %s' % _dictstr(metrics.wx2_pdist_stats[wx]),
'stat(wdist): %s' % _dictstr(metrics.wx2_wdist_stats[wx]),
])
metrics.wx2_nMembers[wx]
df2.figure(fnum=1, doclf=True, docla=True)
fig, ax = df2.imshow(bigpatch, figtitle=figtitle)
#fig.show()
df2.set_figtitle(figtitle)
df2.adjust_subplots(top=.878, bottom=0)
df2.save_figure(1, bigpatch_fpath)
#gtool.imwrite(bigpatch_fpath, bigpatch)
def get_cached_vocabs():
import parse
# Parse some of the training data from fname
parse_str = '{}nC={num_cent},{}_DPTS(({num_dpts},{dim}){}'
smkdir = ut.get_app_resource_dir('smk')
fname_list = ut.glob(smkdir, 'akmeans*')
fpath_list = [join(smkdir, fname) for fname in fname_list]
result_list = [parse.parse(parse_str, fpath) for fpath in fpath_list]
nCent_list = [int(res['num_cent']) for res in result_list]
nDpts_list = [int(res['num_dpts']) for res in result_list]
key_list = zip(nCent_list, nDpts_list)
fpath_sorted = ut.sortedby(fpath_list, key_list, reverse=True)
return fpath_sorted
def view_vocabs():
"""
looks in vocab cachedir and prints info / vizualizes the vocabs using PCA
CommandLine:
python -m ibeis.algo.hots.smk.smk_plots --test-view_vocabs --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.smk.smk_plots import * # NOQA
>>> # build test data
>>> # execute function
>>> view_vocabs()
>>> ut.quit_if_noshow()
>>> ut.show_if_requested()
"""
from vtool import clustering2 as clustertool
import numpy as np
fpath_sorted = get_cached_vocabs()
num_pca_dims = 2 # 3
whiten = False
kwd = dict(num_pca_dims=num_pca_dims,
whiten=whiten,)
def view_vocab(fpath):
# QUANTIZED AND FLOATING POINT STATS
centroids = ut.load_cPkl(fpath)
print('viewing vocat fpath=%r' % (fpath,))
smk_debug.vector_stats(centroids, 'centroids')
#centroids_float = centroids.astype(np.float64) / 255.0
centroids_float = centroids.astype(np.float64) / 512.0
smk_debug.vector_stats(centroids_float, 'centroids_float')
fig = clustertool.plot_centroids(centroids, centroids, labels='centroids',
fnum=1, prefix='centroid vecs\n', **kwd)
fig.show()
for count, fpath in enumerate(fpath_sorted):
if count > 0:
break
view_vocab(fpath)
def plot_chip_metric(ibs, aid, metric=None, fnum=1, lbl='', figtitle='', colortype='score',
darken=.5, cmap_='hot', reverse_cmap=False, **kwargs):
"""
Plots one annotation with one metric.
The word metric is used liberally.
Example:
>>> from ibeis.algo.hots.smk.smk_plots import * # NOQA
>>> from ibeis.algo.hots.smk import smk_debug
>>> from ibeis.algo.hots.smk import smk_plots
>>> from ibeis.algo.hots.smk import smk_repr
>>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=64000)
>>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=8000)
>>> #tup = smk_debug.testdata_raw_internals0(db='PZ_Master0', nWords=64000)
>>> tup = smk_debug.testdata_raw_internals0(db='PZ_Mothers', nWords=8000)
>>> ibs, annots_df, daids, qaids, invindex, qreq_ = tup
>>> smk_repr.compute_data_internals_(invindex, qreq_.qparams, delete_rawvecs=False)
>>> invindex.idx2_wxs = np.array(invindex.idx2_wxs)
>>> metric = None
>>> aid = 1
>>> fnum = 0
>>> lbl='test'
>>> colortype='score'
>>> kwargs = {'annote': False}
#>>> df2.rrr()
>>> smk_plots.plot_chip_metric(ibs, aid, metric, fnum, lbl, colortype, **kwargs)
>>> df2.present()
"""
import plottool.draw_func2 as df2
from ibeis.viz import viz_chip
df2.figure(fnum=fnum, doclf=True, docla=True)
if metric is not None:
if colortype == 'score':
colors = df2.scores_to_color(metric, cmap_=cmap_, reverse_cmap=reverse_cmap)
elif colortype == 'label':
colors = df2.label_to_colors(metric)
elif colortype == 'custom':
# Give ranks of -1 and -2 special meaning
val2_customcolor = {
-1: df2.UNKNOWN_PURP,
-2: df2.LIGHT_BLUE,
}
# Inconsistent but visable colors
scale_max = .7
#consistent colors (needs to know highest K)
#maxval = np.array(metric).max()
#scale_max = .7 * (float(maxval) / 20.0)
colors = df2.scores_to_color(metric, cmap_=cmap_,
reverse_cmap=reverse_cmap,
scale_max=scale_max,
val2_customcolor=val2_customcolor)
else:
raise ValueError('no known colortype = %r' % (colortype,))
else:
colors = 'distinct'
viz_chip.show_chip(ibs, aid, color=colors, darken=darken,
ell_alpha=.8,
#ell_linewidth=4,
ell_linewidth=2,
**kwargs)
df2.set_figtitle(figtitle)
if metric is not None:
cb = df2.colorbar(metric, colors, custom=(colortype == 'custom'))
cb.set_label(lbl)
def get_qres_and_closest_valid_k(ibs, aid, K=4):
"""
Example:
>>> from ibeis.algo.hots.smk.smk_plots import * # NOQA
>>> import numpy as np
>>> from ibeis.algo.hots import query_request
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> aid = 2
"""
# FIXME: Put query_cfg into the qreq_ structure by itself.
# Don't change the IBEIS Structure
cfgdict = {
'pipeline_root': 'vsmany',
'with_metadata': True,
'K': K,
#'sv_on': False,
'sv_on': True,
#K=4
}
#ibs.cfg.query_cfg.pipeline_root = 'vsmany'
#ibs.cfg.query_cfg.with_metadata = True
qaid2_qres, qreq_ = ibs.query_chips([aid], ibs.get_valid_aids(), use_cache=False, return_request=True, cfgdict=cfgdict)
indexer = qreq_.indexer
qres = qaid2_qres[aid]
return qres, None
(qfx2_idx, qfx2_dist) = qres.metadata['nns']
nid = ibs.get_annot_name_rowids(aid)
qfx2_aids = indexer.get_nn_aids(qfx2_idx)
qfx2_nids = ibs.get_annot_name_rowids(qfx2_aids)
qfx2_isself = qfx2_aids != aid
qfx2_correct = np.logical_and(qfx2_nids == nid, qfx2_isself)
# Mark the top ranked groundtruth
qfx2_valid_ks = [np.flatnonzero(ranks) for ranks in qfx2_correct]
NO_VALID_RANKS_CODE = -2
POSSIBLY_VALID_RANKS_CODE = -1
qfx2_closest_k = [ks[0] if len(ks) > 0 else NO_VALID_RANKS_CODE for ks in qfx2_valid_ks]
# Mark cases where it is not possible to know the groundtruth
qfx2_isimpossible = np.logical_and(qfx2_nids < 0, qfx2_isself)
qfx2_possibly_impossible_ks = [np.flatnonzero(ranks) for ranks in qfx2_isimpossible]
# Mark as POSSIBLY_VALID_RANKS_CODE if there is no best k
#
def is_possible(k, pi_ks):
ERR_ON_THE_SIDE_OF_THE_IMPOSSIBLE = False
if len(pi_ks) == 0:
return False
elif k == NO_VALID_RANKS_CODE:
return True
elif ERR_ON_THE_SIDE_OF_THE_IMPOSSIBLE and pi_ks[0] < k:
return True
else:
return False
qfx2_closest_k2 = [POSSIBLY_VALID_RANKS_CODE if is_possible(k, pi_ks) else k
for pi_ks, k in zip(qfx2_possibly_impossible_ks, qfx2_closest_k)]
return qres, qfx2_closest_k2
def viz_annot_with_metrics(ibs, invindex, aid, metrics,
metric_keys=['wx2_nMembers',
('wx2_pdist_stats', 'mean'),
('wx2_wdist_stats', 'mean')],
show_orig=True,
show_idf=True,
show_words=False,
show_analysis=True,
show_aveprecision=True,
qfx2_closest_k_list=None,
show_word_correct_assignments=False,
qres_list=None):
"""
Args:
ibs (IBEISController):
invindex (InvertedIndex): object for fast vocab lookup
aid (int):
metrics (namedtuple):
Example:
>>> from ibeis.algo.hots.smk.smk_plots import * # NOQA
>>> from ibeis.algo.hots.smk import smk_debug
>>> from ibeis.algo.hots.smk import smk_repr
>>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=64000)
>>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=8000)
>>> #tup = smk_debug.testdata_raw_internals0(db='PZ_Master0', nWords=64000)
>>> tup = smk_debug.testdata_raw_internals0(db='PZ_Mothers', nWords=8000)
>>> ibs, annots_df, daids, qaids, invindex, qreq_ = tup
>>> smk_repr.compute_data_internals_(invindex, qreq_.qparams, delete_rawvecs=False)
>>> invindex.idx2_wxs = np.array(invindex.idx2_wxs)
>>> metric_keys=['wx2_nMembers', ('wx2_pdist_stats', 'mean'), ('wx2_wdist_stats', 'mean')]
>>> metrics = compute_word_metrics(invindex)
>>> aid = 1
"""
#viz_chip.rrr()
#df2.rrr()
kpts = ibs.get_annot_kpts(aid)
if ut.VERBOSE:
ut.super_print(kpts)
if show_word_correct_assignments or show_idf:
# Get only the first assigned word
# FIXME: need to look at multi-assignment
_mask = invindex.idx2_daid == aid
fxs = invindex.idx2_dfx[_mask]
wxs = invindex.idx2_wxs[_mask].T[0].T
assert len(fxs) == len(kpts)
assert len(fxs) == len(wxs)
fnum = 1
dbname = ibs.get_dbname()
def _plot(metric, fnum=1, lbl='', annote=True, darken=.1, colortype='score', **kwargs):
print('ploting fnum=%r' % fnum)
#lblaug = ' db=%r, nWords = %r' % (dbname, nWords)
lblaug = ' db=%r' % (dbname)
figtitle = lbl + lblaug
lbl = lbl
plot_chip_metric(ibs, aid, metric=metric, fnum=fnum, lbl=lbl, figtitle=figtitle,
annote=annote, darken=darken, colortype=colortype, **kwargs)
return fnum + 1
# Original Plot
if show_orig:
fnum = _plot(None, fnum=fnum, lbl='Orig Chip', annote=False, darken=None)
# IDF Plot
if show_idf:
idf_list = np.array(list(ut.dict_take_gen(invindex.wx2_idf, wxs)))
fnum = _plot(idf_list, fnum=fnum, lbl='IDF')
print('stats(idf_list) = ' + ut.get_stats_str(idf_list))
# Word Plot
if show_words:
fnum = _plot(wxs, fnum=fnum, lbl='Words', colortype='label')
# LNBNN Result Plots
if qfx2_closest_k_list is not None:
for qres, qfx2_closest_k in zip(qres_list, qfx2_closest_k_list):
print(' --- qres item ---')
if qres is not None:
from ibeis.algo.hots.hots_query_result import QueryResult
assert isinstance(qres, QueryResult)
if show_analysis:
qres.show_analysis(ibs=ibs, fnum=fnum, figtitle=qres.make_smaller_title())
fnum += 1
if show_aveprecision:
qres.show_precision_recall_curve(ibs=ibs, fnum=fnum)
fnum += 1
if qfx2_closest_k is not None:
# Plot ranked positions
qfx2_closest_k = np.array(qfx2_closest_k)
qfx2_closest_k_qeq0 = qfx2_closest_k[qfx2_closest_k >= 0]
qfx2_closest_k_lt0 = qfx2_closest_k[qfx2_closest_k < 0]
print('stats(qfx2_closest_k_qeq0) = ' + ut.get_stats_str(qfx2_closest_k_qeq0))
print('stats(qfx2_closest_k_lt0) = ' + ut.get_stats_str(qfx2_closest_k_lt0))
fnum = _plot(qfx2_closest_k, fnum=fnum, lbl='Correct Ranks ' + qres.make_smaller_title(), colortype='custom', reverse_cmap=True)
# Correct word assignment plots
if show_word_correct_assignments:
unique_wxs, unique_inverse = np.unique(wxs, return_inverse=True)
# Get the aids that belong to each word
_idxs_list = ut.dict_take(invindex.wx2_idxs, unique_wxs)
_aids_list = [invindex.idx2_daid.take(idxs) for idxs in _idxs_list]
# Check if this word will provide a correct assignment -
# two ground truth chip exist within the same word
gt_aids = np.array(ibs.get_annot_groundtruth(aid))
_hastp_list = np.array([len(np.intersect1d(aids, gt_aids)) > 0 for aids in _aids_list])
# Map back to the space of features
# mark each feature match as having a correct word mapping or not
hascorrectmatch = _hastp_list[unique_inverse]
hascorrectmatch_ = hascorrectmatch.astype(np.int32) * 3 - 2
lbl = 'Correct Words ' + qres.make_smaller_title() + '\n Yellow means the word contains a correct match in the word\'s invindex. Blue is the opposite.'
fnum = _plot(hascorrectmatch_, fnum=fnum, lbl=lbl, colortype='custom', reverse_cmap=False)
# Word Metric Plots
for count, metrickey in enumerate(metric_keys):
if isinstance(metrickey, tuple):
#lbl = repr(metrickey)
def fixstr(str_):
return str_.replace('wx2_', '').replace('_stats', '')
lbl = '%s(%s)' % (metrickey[1].upper(), fixstr(metrickey[0]))
else:
lbl = str(metrickey)
metric_list = metric_clamped_stat(metrics, wxs, metrickey)
fnum = _plot(metric_list, fnum=fnum, lbl=lbl)
def smk_plots_main():
"""
smk
python smk_plots.py --db PZ_MTEST --notoolbar
CommandLine:
python -m ibeis.algo.hots.smk.smk_plots --test-smk_plots_main
python -m ibeis.algo.hots.smk.smk_plots --test-smk_plots_main --db PZ_MTEST --notoolbar
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.smk.smk_plots import * # NOQA
>>> smk_plots_main()
"""
from ibeis.algo.hots.smk import smk_plots
import utool as ut
#from plottool import draw_func2 as df2
kwargs = {
#'db': 'GZ_ALL',
#'db': 'PZ_MTEST',
'db': ut.get_argval('--db', str, default='testdb1'),
'nWords': ut.get_argval('--nWords', int, default=8000),
'delete_rawvecs': False,
}
(ibs, annots_df, daids, qaids, invindex, qreq_) = smk_debug.testdata_internals_full(**kwargs)
kwargs = {}
aid = 3
#try:
# testdata = ('metrics',)
# metrics = ut.load_testdata(*testdata)
#except Exception as ex:
metrics = smk_plots.compute_word_metrics(invindex)
#ut.save_testdata(*testdata)
valid_aids = ibs.get_valid_aids()
# HACK
if ibs.get_dbname().startswith('GZ_'):
ibs.cfg.detect_cfg.species_text = 'zebra_grevys'
else:
ibs.cfg.detect_cfg.species_text = 'zebra_plains'
# Define the plots you want
startx = ut.get_argval(('--startx', '--x'), int, default=min(18, len(valid_aids) - 1))
for aid in ut.InteractiveIter(valid_aids, startx=startx):
#df2.rrr()
#smk_plots.rrr()
print('[smk_plot] visualizing annotation aid=%r' % (aid,))
kwargs = smk_plots.main_options()
qres_list = []
qfx2_closest_k_list = []
K_list = kwargs.pop('K_list')
for K in K_list:
qres, qfx2_closest_k = smk_plots.get_qres_and_closest_valid_k(ibs, aid, K=K)
qres_list.append(qres)
qfx2_closest_k_list.append(qfx2_closest_k)
smk_plots.viz_annot_with_metrics(ibs, invindex, aid, metrics,
qfx2_closest_k_list=qfx2_closest_k_list,
qres_list=qres_list, **kwargs)
smk_plots.present()
#return execstr
def present():
# In its own function for reloadableness
from plottool import draw_func2 as df2
return df2.present(max_rows=4, row_first=False)
def main_options():
metric_keys = [
#'wx2_nMembers',
#('wx2_pdist_stats', 'mean'),
#('wx2_wdist_stats', 'mean'),
]
kwargs = dict(
show_orig=False,
show_idf=False,
show_words=False,
show_analysis=True,
show_aveprecision=False,
show_word_correct_assignments=True,
metric_keys=metric_keys,
K_list=[2, 4, 10],
#K_list=[10, 20],
#K_list=[4, 10],
)
return kwargs
#if __name__ == '__main__':
# """
# >>> aid = 1
# """
# execstr = smk_plots_main()
# #exec(execstr)
if __name__ == '__main__':
"""
python -m ibeis.algo.hots.smk.smk_plots --test-view_vocabs --show
python -m ibeis.algo.hots.smk.smk_debug --test-main_smk_debug
python -m ibeis.algo.hots.smk.smk_plots --test-smk_plots_main --db PZ_MTEST --notoolbar
CommandLine:
python -m ibeis.algo.hots.smk.smk_plots
python -m ibeis.algo.hots.smk.smk_plots --allexamples
python -m ibeis.algo.hots.smk.smk_plots --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
{
"content_hash": "cec460f689210520d0d487ac94878d9f",
"timestamp": "",
"source": "github",
"line_count": 781,
"max_line_length": 159,
"avg_line_length": 40.27016645326505,
"alnum_prop": 0.6014117198181298,
"repo_name": "SU-ECE-17-7/ibeis",
"id": "7cb9978371bd76bab1c7731dbf84dd232503dc09",
"size": "31497",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "ibeis/algo/hots/smk/smk_plots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "331"
},
{
"name": "CSS",
"bytes": "26792"
},
{
"name": "HTML",
"bytes": "33762203"
},
{
"name": "Inno Setup",
"bytes": "1585"
},
{
"name": "JavaScript",
"bytes": "227454"
},
{
"name": "Jupyter Notebook",
"bytes": "66346367"
},
{
"name": "Python",
"bytes": "6112508"
},
{
"name": "Shell",
"bytes": "58211"
}
],
"symlink_target": ""
}
|
import subprocess
import sys
from setuptools import setup
try:
cmd = (
"g++ -std=c++11 -O3 scib/knn_graph/knn_graph.cpp -o scib/knn_graph/knn_graph.o"
)
sys.stdout.write("Compile knn_graph C++ code for LISI metric...\n")
sys.stdout.flush()
subprocess.check_output(
cmd, stderr=subprocess.STDOUT, shell=True, universal_newlines=True
)
except subprocess.CalledProcessError as exc:
sys.stdout.write(
f"Failed to compile knn_graph for LISI - skipping...\n{exc.returncode}\n{exc.output}"
)
sys.stdout.flush()
setup()
|
{
"content_hash": "4f810386a076e59e63a4016e080fe07b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 93,
"avg_line_length": 27.38095238095238,
"alnum_prop": 0.6660869565217391,
"repo_name": "theislab/scib",
"id": "45677ce07b5dbdf6ee8ee7d6adcfcffcf7dff79a",
"size": "575",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "14207"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Python",
"bytes": "172789"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_link_resources_operations import build_list_by_storage_account_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2020_08_01_preview.aio.StorageManagementClient`'s
:attr:`private_link_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list_by_storage_account(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> _models.PrivateLinkResourceListResult:
"""Gets the private link resources that need to be created for a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2020_08_01_preview.models.PrivateLinkResourceListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2020-08-01-preview")
) # type: Literal["2020-08-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateLinkResourceListResult]
request = build_list_by_storage_account_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_storage_account.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_storage_account.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources"} # type: ignore
|
{
"content_hash": "8253600c6e691d9df38d04da9ee92aae",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 211,
"avg_line_length": 43.823008849557525,
"alnum_prop": 0.6861873990306947,
"repo_name": "Azure/azure-sdk-for-python",
"id": "bb0c37412d60df0a7f4962cb4ae486633b2925ae",
"size": "5452",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2020_08_01_preview/aio/operations/_private_link_resources_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import datetime
import PyRSS2Gen
rss = PyRSS2Gen.RSS2(
title = "Andrew's PyRSS2Gen feed",
link = "http://www.dalkescientific.com/Python/PyRSS2Gen.html",
description = "The latest news about PyRSS2Gen, a "
"Python library for generating RSS2 feeds",
lastBuildDate = datetime.datetime.utcnow(),
items = [
PyRSS2Gen.RSSItem(
title = "PyRSS2Gen-0.0 released",
link = "http://www.dalkescientific.com/news/030906-PyRSS2Gen.html",
description = "Dalke Scientific today announced PyRSS2Gen-0.0, "
"a library for generating RSS feeds for Python. ",
guid = PyRSS2Gen.Guid("http://www.dalkescientific.com/news/"
"030906-PyRSS2Gen.html"),
pubDate = datetime.datetime(2003, 9, 6, 21, 31)),
PyRSS2Gen.RSSItem(
title = "Thoughts on RSS feeds for bioinformatics",
link = "http://www.dalkescientific.com/writings/diary/"
"archive/2003/09/06/RSS.html",
description = "One of the reasons I wrote PyRSS2Gen was to "
"experiment with RSS for data collection in "
"bioinformatics. Last year I came across...",
guid = PyRSS2Gen.Guid("http://www.dalkescientific.com/writings/"
"diary/archive/2003/09/06/RSS.html"),
pubDate = datetime.datetime(2003, 9, 6, 21, 49)),
])
rss.write_xml(open("pyrss2gen.xml", "w"))
|
{
"content_hash": "0c1908de3954b91f4fe8181e4220a9e1",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 44.84848484848485,
"alnum_prop": 0.5972972972972973,
"repo_name": "liamzebedee/PyRSSGen",
"id": "4e6b29ad8a199f08e3a6d0a775c9800fbbaa62d5",
"size": "1529",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22055"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import logging
import sys
from enum import Enum
import requests
try:
# python3.x
from urllib.parse import urljoin
except ImportError:
# python2.x
# noinspection PyUnresolvedReferences
from urlparse import urljoin
__all__ = ['Client', 'BadResponse']
GODADDY_API_BASE_URL = 'https://api.godaddy.com/'
GODADDY_API_VERSION = 'v1'
class Client(object):
"""The GoDaddyPy Client.
This client is used to connect to the GoDaddy API and to perform requests with said API.
"""
class Domain(Enum):
AUTH_CODE = 'authCode'
CONTACTS = 'contacts'
NAME_SERVERS = 'nameServers'
def __init__(self, account, log_level=None, api_base_url=GODADDY_API_BASE_URL, api_version=GODADDY_API_VERSION):
"""Create a new `godaddypy.Client` object
:type account: godaddypy.Account
:param account: The godaddypy.Account object to create auth headers with.
"""
# Logging setup
self.logger = logging.getLogger('GoDaddyPy.Client')
# Explicit override of logging level
if log_level is not None:
self.logger.setLevel(log_level)
# Templates
self.API_TEMPLATE = urljoin(api_base_url, api_version)
self.DOMAINS = '/domains'
self.DOMAIN_INFO = '/domains/{domain}'
self.RECORDS = '/domains/{domain}/records'
self.RECORDS_TYPE = '/domains/{domain}/records/{type}'
self.RECORDS_TYPE_NAME = '/domains/{domain}/records/{type}/{name}'
self.account = account
def _build_record_url(self, domain, record_type=None, name=None):
url = self.API_TEMPLATE
if name is None and record_type is None:
url += self.RECORDS.format(domain=domain)
elif name is None and record_type is not None:
url += self.RECORDS_TYPE.format(domain=domain, type=record_type)
elif name is not None and record_type is None:
raise ValueError("If name is specified, type must also be specified")
else:
url += self.RECORDS_TYPE_NAME.format(domain=domain, type=record_type, name=name)
return url
def _get_headers(self):
return self.account.get_headers()
def _get_json_from_response(self, url, json=None, **kwargs):
return self._request_submit(requests.get, url=url, json=json, **kwargs).json()
def _log_response_from_method(self, req_type, resp):
self.logger.debug('[{req_type}] response: {resp}'.format(resp=resp, req_type=req_type.upper()))
self.logger.debug('Response data: {}'.format(resp.content))
def _patch(self, url, json=None, **kwargs):
return self._request_submit(requests.patch, url=url, json=json, **kwargs)
def _put(self, url, json=None, **kwargs):
return self._request_submit(requests.put, url=url, json=json, **kwargs)
def _request_submit(self, func, **kwargs):
"""A helper function that will wrap any requests we make.
:param func: a function reference to the requests method to invoke
:param kwargs: any extra arguments that requests.request takes
:type func: (url: Any, data: Any, json: Any, kwargs: Dict)
"""
resp = func(headers=self._get_headers(), **kwargs)
self._log_response_from_method(func.__name__, resp)
self._validate_response_success(resp)
return resp
@staticmethod
def _validate_response_success(response):
""" Only raise exceptions for 4xx/5xx errors because GoDaddy doesn't
always return 200 for a correct request """
try:
response.raise_for_status()
except Exception:
raise BadResponse(response.json())
def add_record(self, domain, record):
"""Adds the specified DNS record to a domain.
:param domain: the domain to add the record to
:param record: the record to add
"""
self.add_records(domain, [record])
# If we didn't get any exceptions, return True to let the user know
return True
def add_records(self, domain, records):
"""Adds the specified DNS records to a domain.
:param domain: the domain to add the records to
:param records: the records to add
"""
url = self.API_TEMPLATE + self.RECORDS.format(domain=domain)
self._patch(url, json=records)
self.logger.debug('Added records @ {}'.format(records))
# If we didn't get any exceptions, return True to let the user know
return True
def get_domain_info(self, domain):
"""Get the GoDaddy supplied information about a specific domain.
:param domain: The domain to obtain info about.
:type domain: str
:return A JSON string representing the domain information
"""
url = self.API_TEMPLATE + self.DOMAIN_INFO.format(domain=domain)
return self._get_json_from_response(url)
def get_domains(self, **params):
"""Returns a list of domains for the authenticated user.
:param params: Dict of query params to send with the domains request
"""
url = self.API_TEMPLATE + self.DOMAINS
data = self._get_json_from_response(url, params=params)
domains = list()
for item in data:
domain = item['domain']
domains.append(domain)
self.logger.debug('Discovered domains: {}'.format(domain))
return domains
def update_domain(self, domain, **kwargs):
"""
Update an existing domain via PATCH /v1/domains/{domain}
https://developer.godaddy.com/doc#!/_v1_domains/update
currently it supports ( all optional )
locked = boolean
nameServers = list
renewAuto = boolean
subaccountId = string
NOTE: It can take minutes for GoDaddy to update the record. Make sure you
wait before checking status.
"""
update = {}
for k, v in kwargs.items():
update[k] = v
url = self.API_TEMPLATE + self.DOMAIN_INFO.format(domain=domain)
self._patch(url, json=update)
self.logger.info("Updated domain {} with {}".format(domain, update))
def get_records(self, domain, record_type=None, name=None):
"""Returns records from a single domain. You can specify type/name as filters for the records returned. If
you specify a name you MUST also specify a type.
:param domain: the domain to get DNS information from
:param record_type: the type of record(s) to retrieve
:param name: the name of the record(s) to retrieve
"""
url = self._build_record_url(domain, record_type=record_type, name=name)
data = self._get_json_from_response(url)
self.logger.debug('Retrieved {} record(s) from {}.'.format(len(data), domain))
return data
def replace_records(self, domain, records, record_type=None, name=None):
"""This will replace all records at the domain. Record type and record name can be provided to filter
which records to replace.
:param domain: the domain to replace records at
:param records: the records you will be saving
:param record_type: the type of records you want to replace (eg. only replace 'A' records)
:param name: the name of records you want to replace (eg. only replace records with name 'test')
:return: True if no exceptions occurred
"""
url = self._build_record_url(domain, name=name, record_type=record_type)
self._put(url, json=records)
# If we didn't get any exceptions, return True to let the user know
return True
def update_ip(self, ip, record_type='A', domains=None, subdomains=None):
"""Update the IP address in all records, specified by type, to the value of ip. Returns True if no
exceptions occurred during the update. If no domains are provided, all domains returned from
self.get_domains() will be updated. By default, only A records are updated.
:param record_type: The type of records to update (eg. 'A')
:param ip: The new IP address (eg. '123.1.2.255')
:param domains: A list of the domains you want to update (eg. ['123.com','abc.net'])
:param subdomains: A list of the subdomains you want to update (eg. ['www','dev'])
:type record_type: str or unicode
:type ip: str or unicode
:type domains: str, list of str
:type subdomains: str, list of str
:return: True if no exceptions occurred
"""
if domains is None:
domains = self.get_domains()
elif sys.version_info < (3, 0):
# noinspection PyUnresolvedReferences
if isinstance(domains, (str, unicode)):
domains = [domains]
elif sys.version_info >= (3, 0):
if isinstance(domains, str):
domains = [domains]
else:
# we have a tuple, set, or something else, try to convert it to a list
domains = list(domains)
for domain in domains:
a_records = self.get_records(domain, record_type=record_type)
for record in a_records:
r_name = str(record['name'])
r_ip = str(record['data'])
if not r_ip == ip:
# noinspection PyUnresolvedReferences
if (subdomains is None or
(isinstance(subdomains, (unicode, str)) and r_name == subdomains) or
r_name in subdomains):
record.update(data=str(ip))
self.update_record(domain, record)
# If we didn't get any exceptions, return True to let the user know
return True
def delete_records(self, domain, name, record_type=None):
"""Deletes records by name. You can also add a record type, which will only delete records with the
specified type/name combo. If no record type is specified, ALL records that have a matching name will be
deleted.
This is haphazard functionality. I DO NOT recommend using this in Production code, as your entire DNS record
set could be deleted, depending on the fickleness of GoDaddy. Unfortunately, they do not expose a proper
"delete record" call, so there isn't much one can do here...
:param domain: the domain to delete records from
:param name: the name of records to remove
:param record_type: the type of records to remove
:return: True if no exceptions occurred
"""
records = self.get_records(domain)
if records is None:
return False # we don't want to replace the records with nothing at all
save = list()
deleted = 0
for record in records:
if (record_type == str(record['type']) or record_type is None) and name == str(record['name']):
deleted += 1
else:
save.append(record)
self.replace_records(domain, records=save)
self.logger.info("Deleted {} records @ {}".format(deleted, domain))
# If we didn't get any exceptions, return True to let the user know
return True
def update_record(self, domain, record, record_type=None, name=None):
"""Call to GoDaddy API to update a single DNS record
:param name: only required if the record is None (deletion)
:param record_type: only required if the record is None (deletion)
:param domain: the domain where the DNS belongs to (eg. 'example.com')
:param record: dict with record info (ex. {'name': 'dynamic', 'ttl': 3600, 'data': '1.1.1.1', 'type': 'A'})
:return: True if no exceptions occurred
"""
if record_type is None:
record_type = record['type']
if name is None:
name = record['name']
url = self.API_TEMPLATE + self.RECORDS_TYPE_NAME.format(domain=domain, type=record_type, name=name)
self._put(url, json=[record])
self.logger.info(
'Updated record. Domain {} name {} type {}'.format(domain, str(record['name']), str(record['type'])))
# If we didn't get any exceptions, return True to let the user know
return True
def update_record_ip(self, ip, domain, name, record_type):
"""Update the IP address(es) for (a) domain(s) specified by type and name.
:param ip: the new IP for the DNS record (ex. '123.1.2.255')
:param domain: the domain where the DNS belongs to (ex. 'example.com')
:param name: the DNS record name to be updated (ex. 'dynamic')
:param record_type: Record type (ex. 'CNAME', 'A'...)
:return: True if no exceptions occurred
"""
records = self.get_records(domain, name=name, record_type=record_type)
data = {'data': str(ip)}
for rec in records:
rec.update(data)
self.update_record(domain, rec)
# If we didn't get any exceptions, return True to let the user know
return True
class BadResponse(Exception):
def __init__(self, message, *args, **kwargs):
self.message = message
super(BadResponse, *args, **kwargs)
def __str__(self, *args, **kwargs):
return 'Response Data: {}'.format(self.message)
|
{
"content_hash": "aa3faa25dadfcdb7f8a53dfaf1c0085a",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 118,
"avg_line_length": 39.125,
"alnum_prop": 0.6169106174307155,
"repo_name": "eXamadeus/godaddypy",
"id": "10df10c266ec98d7f45462de7c32a909e647b81e",
"size": "13459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "godaddypy/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21994"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from future.builtins import int, zip
from functools import reduce
from operator import ior, iand
from string import punctuation
from django.db.models import Manager, Q, CharField, TextField, get_models
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.models import get_model
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED))
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableQuerySet, self).__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
#### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
#### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
#### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs)
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names)
def iterator(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableManager, self).__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_query_set`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_query_set(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Django 1.5 explicitly prevents managers being accessed from
abstract classes, which is behaviour the search API has relied
on for years. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in get_models() if issubclass(m, self.model)]
parents = reduce(ior, [m._meta.get_parent_list() for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set([get_model(*name.split(".", 1)) for name in
settings.SEARCH_MODEL_CHOICES])
models = set()
parents = set()
for model in get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_query_set()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_query_set(self):
if not self.__is_validated:
try:
# Django <= 1.6
self._validate_field_name()
except AttributeError:
# Django >= 1.7: will populate "self.__field_name".
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_query_set().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager,
SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
home = self.model(title=_("Home"))
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items
|
{
"content_hash": "648c26f31743ab2e7467d7feb7ff0b77",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 78,
"avg_line_length": 43.76648351648352,
"alnum_prop": 0.5987696943066977,
"repo_name": "agepoly/mezzanine",
"id": "8562f72e5418adfd3f08f80f6b27a368e673851c",
"size": "15931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/core/managers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "100332"
},
{
"name": "HTML",
"bytes": "92057"
},
{
"name": "JavaScript",
"bytes": "228888"
},
{
"name": "Nginx",
"bytes": "1354"
},
{
"name": "Python",
"bytes": "1277115"
}
],
"symlink_target": ""
}
|
from Test_Column_ObjFnc import tac_column
import time
"""
% -------------------------------------------------------------------------
% SIMULATION-BASED OPTIMIZATION OF A SINGLE CONVENTIONAL DISTILLATION
% COLUMN USING THE PARTICLE SWARM OPTIMIZATION ALGORITHM
%--------------------------------------------------------------------------
% Juan Javaloyes Antón. Sep 2016 v.3
%--------------------------------------------------------------------------
% # 04 # Distillation column model
%--------------------------------------------------------------------------
"""
def distColumn_model(x, Problem):
# Independent Variables
RR = x[0] # * RR: Reflux Ratio
BR = x[1] # * BR: Boilup Ratio
NR = x[2] # * NR: Number of active trays in rectifying section
NS = x[3] # * NS: Number of active trays in stripping section
HyObject = Problem.HyObject # Recover Hysys Objects from structure Problem
NT = (NR + NS) + 1 # Total number of active trays
Feed_S = NR + 1 # Feed location
# 01 Change Column Topology and Column specifications (degrees of freedom)
HyObject = Problem.HyObject # Recover Hysys Objects from structure Problem
# Total number of active trays
HyObject.DistColumn.Main_TS.NumberOfTrays = NT
# Feed location
HyObject.DistColumn.Main_TS.SpecifyFeedLocation(HyObject.DistColumn.FeedMainTS, Feed_S)
# Reflux Ratio
HyObject.DistColumn.Column.ColumnFlowsheet.Specifications.Item('Reflux Ratio').GoalValue = RR
# Boilup Ratio
HyObject.DistColumn.Column.ColumnFlowsheet.Specifications.Item('Boilup Ratio').GoalValue = BR
# 02 Run Aspen Hysys model with new topology
HyObject.DistColumn.ColumnFlowsheet.Run() # Run Aspen Hysy model
# time.sleep(0.3)
# 03 Check model convergence
RunStatus = HyObject.HyApp.ActiveDocument.Flowsheet.Operations.Item(0).ColumnFlowsheet.CfsConverged
if RunStatus == 1:
# 04 Compute the Total Annual Cost of the Distillation Column
ColumnCost = tac_column(Problem) # from Test_Column_ObjFnc
# 05 Check purity constraints
Tol_dist = 0.001 # Molar Fraction Impurites
Bz_Bottoms = 0.001
Comp_frac_Tol_dist = HyObject.MaterialStream.Distillate.ComponentMolarFractionValue[1]
Comp_frac_Bz_Bott = HyObject.MaterialStream.Bottoms.ComponentMolarFractionValue[0]
if Comp_frac_Tol_dist > Tol_dist:
w1 = (Comp_frac_Tol_dist - Tol_dist)*1e5
else:
w1 = 0
if Comp_frac_Bz_Bott > Bz_Bottoms:
w2 = (Comp_frac_Bz_Bott - Bz_Bottoms)*1e5
else:
w2 = 0
# Total Annual Cost + penalty terms
TAC = ColumnCost.TAC + w1 + w2
else: # In case model does not converge
TAC = 1e5
return (TAC)
|
{
"content_hash": "dcff3b3981f78e9e9a73e4f00e883310",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 104,
"avg_line_length": 37.55128205128205,
"alnum_prop": 0.5742574257425742,
"repo_name": "CAChemE/stochastic-optimization",
"id": "2d32a8b2ca89640eb3d1eedd4a0b915ca6d8e134",
"size": "2956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ConventionalDistillationColumn/column_algorithm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3363048"
},
{
"name": "Matlab",
"bytes": "12828"
},
{
"name": "Python",
"bytes": "74044"
}
],
"symlink_target": ""
}
|
""" Unit test for ‘_metadata’ private module.
"""
from __future__ import (absolute_import, unicode_literals)
import sys
import errno
import re
try:
# Python 3 standard library.
import urllib.parse as urlparse
except ImportError:
# Python 2 standard library.
import urlparse
import functools
import collections
import json
import pkg_resources
import mock
import testtools.helpers
import testtools.matchers
import testscenarios
from . import scaffold
from .scaffold import (basestring, unicode)
import daemon._metadata as metadata
class HasAttribute(testtools.matchers.Matcher):
""" A matcher to assert an object has a named attribute. """
def __init__(self, name):
self.attribute_name = name
def match(self, instance):
""" Assert the object `instance` has an attribute named `name`. """
result = None
if not testtools.helpers.safe_hasattr(instance, self.attribute_name):
result = AttributeNotFoundMismatch(instance, self.attribute_name)
return result
class AttributeNotFoundMismatch(testtools.matchers.Mismatch):
""" The specified instance does not have the named attribute. """
def __init__(self, instance, name):
self.instance = instance
self.attribute_name = name
def describe(self):
""" Emit a text description of this mismatch. """
text = (
"{instance!r}"
" has no attribute named {name!r}").format(
instance=self.instance, name=self.attribute_name)
return text
class metadata_value_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for metadata module values. """
expected_str_attributes = set([
'version_installed',
'author',
'copyright',
'license',
'url',
])
scenarios = [
(name, {'attribute_name': name})
for name in expected_str_attributes]
for (name, params) in scenarios:
if name == 'version_installed':
# No duck typing, this attribute might be None.
params['ducktype_attribute_name'] = NotImplemented
continue
# Expect an attribute of ‘str’ to test this value.
params['ducktype_attribute_name'] = 'isdigit'
def test_module_has_attribute(self):
""" Metadata should have expected value as a module attribute. """
self.assertThat(
metadata, HasAttribute(self.attribute_name))
def test_module_attribute_has_duck_type(self):
""" Metadata value should have expected duck-typing attribute. """
if self.ducktype_attribute_name == NotImplemented:
self.skipTest("Can't assert this attribute's type")
instance = getattr(metadata, self.attribute_name)
self.assertThat(
instance, HasAttribute(self.ducktype_attribute_name))
class parse_person_field_TestCase(
testscenarios.WithScenarios, testtools.TestCase):
""" Test cases for ‘get_latest_version’ function. """
scenarios = [
('simple', {
'test_person': "Foo Bar <foo.bar@example.com>",
'expected_result': ("Foo Bar", "foo.bar@example.com"),
}),
('empty', {
'test_person': "",
'expected_result': (None, None),
}),
('none', {
'test_person': None,
'expected_error': TypeError,
}),
('no email', {
'test_person': "Foo Bar",
'expected_result': ("Foo Bar", None),
}),
]
def test_returns_expected_result(self):
""" Should return expected result. """
if hasattr(self, 'expected_error'):
self.assertRaises(
self.expected_error,
metadata.parse_person_field, self.test_person)
else:
result = metadata.parse_person_field(self.test_person)
self.assertEqual(self.expected_result, result)
class YearRange_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘YearRange’ class. """
scenarios = [
('simple', {
'begin_year': 1970,
'end_year': 1979,
'expected_text': "1970–1979",
}),
('same year', {
'begin_year': 1970,
'end_year': 1970,
'expected_text': "1970",
}),
('no end year', {
'begin_year': 1970,
'end_year': None,
'expected_text': "1970",
}),
]
def setUp(self):
""" Set up test fixtures. """
super(YearRange_TestCase, self).setUp()
self.test_instance = metadata.YearRange(
self.begin_year, self.end_year)
def test_text_representation_as_expected(self):
""" Text representation should be as expected. """
result = unicode(self.test_instance)
self.assertEqual(result, self.expected_text)
FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end'])
@mock.patch.object(metadata, 'YearRange', new=FakeYearRange)
class make_year_range_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘make_year_range’ function. """
scenarios = [
('simple', {
'begin_year': "1970",
'end_date': "1979-01-01",
'expected_range': FakeYearRange(begin=1970, end=1979),
}),
('same year', {
'begin_year': "1970",
'end_date': "1970-01-01",
'expected_range': FakeYearRange(begin=1970, end=1970),
}),
('no end year', {
'begin_year': "1970",
'end_date': None,
'expected_range': FakeYearRange(begin=1970, end=None),
}),
('end date UNKNOWN token', {
'begin_year': "1970",
'end_date': "UNKNOWN",
'expected_range': FakeYearRange(begin=1970, end=None),
}),
('end date FUTURE token', {
'begin_year': "1970",
'end_date': "FUTURE",
'expected_range': FakeYearRange(begin=1970, end=None),
}),
]
def test_result_matches_expected_range(self):
""" Result should match expected YearRange. """
result = metadata.make_year_range(self.begin_year, self.end_date)
self.assertEqual(result, self.expected_range)
class metadata_content_TestCase(scaffold.TestCase):
""" Test cases for content of metadata. """
def test_copyright_formatted_correctly(self):
""" Copyright statement should be formatted correctly. """
regex_pattern = (
"Copyright © "
"\d{4}" # four-digit year
"(?:–\d{4})?" # optional range dash and ending four-digit year
)
regex_flags = re.UNICODE
self.assertThat(
metadata.copyright,
testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
def test_author_formatted_correctly(self):
""" Author information should be formatted correctly. """
regex_pattern = (
".+ " # name
"<[^>]+>" # email address, in angle brackets
)
regex_flags = re.UNICODE
self.assertThat(
metadata.author,
testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
def test_copyright_contains_author(self):
""" Copyright information should contain author information. """
self.assertThat(
metadata.copyright,
testtools.matchers.Contains(metadata.author))
def test_url_parses_correctly(self):
""" Homepage URL should parse correctly. """
result = urlparse.urlparse(metadata.url)
self.assertIsInstance(
result, urlparse.ParseResult,
"URL value {url!r} did not parse correctly".format(
url=metadata.url))
try:
FileNotFoundError
except NameError:
# Python 2 uses IOError.
FileNotFoundError = functools.partial(IOError, errno.ENOENT)
version_info_filename = "version_info.json"
def fake_func_has_metadata(testcase, resource_name):
""" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. """
if (
resource_name != testcase.expected_resource_name
or not hasattr(testcase, 'test_version_info')):
return False
return True
def fake_func_get_metadata(testcase, resource_name):
""" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. """
if not fake_func_has_metadata(testcase, resource_name):
error = FileNotFoundError(resource_name)
raise error
content = testcase.test_version_info
return content
def fake_func_get_distribution(testcase, distribution_name):
""" Fake the behaviour of ‘pkg_resources.get_distribution’. """
if distribution_name != metadata.distribution_name:
raise pkg_resources.DistributionNotFound
if hasattr(testcase, 'get_distribution_error'):
raise testcase.get_distribution_error
mock_distribution = testcase.mock_distribution
mock_distribution.has_metadata.side_effect = functools.partial(
fake_func_has_metadata, testcase)
mock_distribution.get_metadata.side_effect = functools.partial(
fake_func_get_metadata, testcase)
return mock_distribution
@mock.patch.object(metadata, 'distribution_name', new="mock-dist")
class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘get_distribution_version_info’ function. """
default_version_info = {
'release_date': "UNKNOWN",
'version': "UNKNOWN",
'maintainer': "UNKNOWN",
}
scenarios = [
('version 0.0', {
'test_version_info': json.dumps({
'version': "0.0",
}),
'expected_version_info': {'version': "0.0"},
}),
('version 1.0', {
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_version_info': {'version': "1.0"},
}),
('file lorem_ipsum.json', {
'version_info_filename': "lorem_ipsum.json",
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_version_info': {'version': "1.0"},
}),
('not installed', {
'get_distribution_error': pkg_resources.DistributionNotFound(),
'expected_version_info': default_version_info,
}),
('no version_info', {
'expected_version_info': default_version_info,
}),
]
def setUp(self):
""" Set up test fixtures. """
super(get_distribution_version_info_TestCase, self).setUp()
if hasattr(self, 'expected_resource_name'):
self.test_args = {'filename': self.expected_resource_name}
else:
self.test_args = {}
self.expected_resource_name = version_info_filename
self.mock_distribution = mock.MagicMock()
func_patcher_get_distribution = mock.patch.object(
pkg_resources, 'get_distribution')
func_patcher_get_distribution.start()
self.addCleanup(func_patcher_get_distribution.stop)
pkg_resources.get_distribution.side_effect = functools.partial(
fake_func_get_distribution, self)
def test_requests_installed_distribution(self):
""" The package distribution should be retrieved. """
expected_distribution_name = metadata.distribution_name
version_info = metadata.get_distribution_version_info(**self.test_args)
pkg_resources.get_distribution.assert_called_with(
expected_distribution_name)
def test_requests_specified_filename(self):
""" The specified metadata resource name should be requested. """
if hasattr(self, 'get_distribution_error'):
self.skipTest("No access to distribution")
version_info = metadata.get_distribution_version_info(**self.test_args)
self.mock_distribution.has_metadata.assert_called_with(
self.expected_resource_name)
def test_result_matches_expected_items(self):
""" The result should match the expected items. """
version_info = metadata.get_distribution_version_info(**self.test_args)
self.assertEqual(self.expected_version_info, version_info)
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
|
{
"content_hash": "f55b2f3ecd7f244dd3f26201d3b0b9e1",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 79,
"avg_line_length": 34.744,
"alnum_prop": 0.5747946887712027,
"repo_name": "dproc/trex_odp_porting_integration",
"id": "692753f47a3a7c259069fa54df0d9e92eab9e8ac",
"size": "13514",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "scripts/external_libs/python-daemon-2.0.5/test/test_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9616073"
},
{
"name": "C++",
"bytes": "3147123"
},
{
"name": "CMake",
"bytes": "8882"
},
{
"name": "HTML",
"bytes": "4523"
},
{
"name": "JavaScript",
"bytes": "1234"
},
{
"name": "Makefile",
"bytes": "129776"
},
{
"name": "Python",
"bytes": "2740100"
},
{
"name": "Shell",
"bytes": "3026"
}
],
"symlink_target": ""
}
|
from test.test_support import run_unittest, check_py3k_warnings
import unittest
class OpcodeTest(unittest.TestCase):
def test_try_inside_for_loop(self):
n = 0
for i in range(10):
n = n+i
try: 1 // 0
except NameError: pass
except ZeroDivisionError: pass
except TypeError: pass
try: pass
except: pass
try: pass
finally: pass
n = n+i
if n != 90:
self.fail('try inside for')
def test_raise_class_exceptions(self):
class AClass: pass
class BClass(AClass): pass
class CClass: pass
class DClass(AClass):
def __init__(self, ignore):
pass
try: raise AClass()
except: pass
try: raise AClass()
except AClass: pass
try: raise BClass()
except AClass: pass
try: raise BClass()
except CClass: self.fail()
except: pass
a = AClass()
b = BClass()
try: raise AClass, b
except BClass, v:
self.assertEqual(v, b)
else: self.fail("no exception")
try: raise b
except AClass, v:
self.assertEqual(v, b)
else:
self.fail("no exception")
# not enough arguments
try: raise BClass, a
except TypeError: pass
else: self.fail("no exception")
try: raise DClass, a
except DClass, v:
self.assertIsInstance(v, DClass)
else:
self.fail("no exception")
def test_compare_function_objects(self):
f = eval('lambda: None')
g = eval('lambda: None')
self.assertNotEquals(f, g)
f = eval('lambda a: a')
g = eval('lambda a: a')
self.assertNotEquals(f, g)
f = eval('lambda a=1: a')
g = eval('lambda a=1: a')
self.assertNotEquals(f, g)
f = eval('lambda: 0')
g = eval('lambda: 1')
self.assertNotEquals(f, g)
f = eval('lambda: None')
g = eval('lambda a: None')
self.assertNotEquals(f, g)
f = eval('lambda a: None')
g = eval('lambda b: None')
self.assertNotEquals(f, g)
f = eval('lambda a: None')
g = eval('lambda a=None: None')
self.assertNotEquals(f, g)
f = eval('lambda a=0: None')
g = eval('lambda a=1: None')
self.assertNotEquals(f, g)
def test_modulo_of_string_subclasses(self):
class MyString(str):
def __mod__(self, value):
return 42
self.assertEqual(MyString() % 3, 42)
def test_main():
with check_py3k_warnings(("exceptions must derive from BaseException",
DeprecationWarning),
("catching classes that don't inherit "
"from BaseException is not allowed",
DeprecationWarning)):
run_unittest(OpcodeTest)
if __name__ == '__main__':
test_main()
|
{
"content_hash": "f6c89dc283e70a93736691be1abaaf87",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 74,
"avg_line_length": 25.873949579831933,
"alnum_prop": 0.5086066904839234,
"repo_name": "fkolacek/FIT-VUT",
"id": "c93ef86df2a94f36e589e1d0faa9c98edd1469b8",
"size": "3117",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bp-revok/python/lib/python2.7/test/test_opcodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455326"
},
{
"name": "Awk",
"bytes": "8724"
},
{
"name": "Batchfile",
"bytes": "201"
},
{
"name": "Brainfuck",
"bytes": "83"
},
{
"name": "C",
"bytes": "5006938"
},
{
"name": "C++",
"bytes": "1835332"
},
{
"name": "CSS",
"bytes": "301045"
},
{
"name": "CoffeeScript",
"bytes": "46327"
},
{
"name": "Groff",
"bytes": "46766"
},
{
"name": "HTML",
"bytes": "937735"
},
{
"name": "Java",
"bytes": "552132"
},
{
"name": "JavaScript",
"bytes": "1742225"
},
{
"name": "Lua",
"bytes": "39700"
},
{
"name": "Makefile",
"bytes": "381793"
},
{
"name": "Objective-C",
"bytes": "4618"
},
{
"name": "PHP",
"bytes": "108701"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "60353"
},
{
"name": "Python",
"bytes": "22084026"
},
{
"name": "QMake",
"bytes": "2660"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ragel in Ruby Host",
"bytes": "17993"
},
{
"name": "Ruby",
"bytes": "21607145"
},
{
"name": "Shell",
"bytes": "611321"
},
{
"name": "Tcl",
"bytes": "4920"
},
{
"name": "TeX",
"bytes": "561423"
},
{
"name": "VHDL",
"bytes": "49180"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "154638"
},
{
"name": "Yacc",
"bytes": "32788"
}
],
"symlink_target": ""
}
|
from peachbox.model.view import View
import peachbox.model
import uuid
class RealTimeView(View):
keys = []
_cassandra_initialized = False
_cassandra_indices = None
@classmethod
def row(cls, **kwargs):
if not cls._cassandra_initialized: cls.cassandra_initialize()
partition_key = uuid.uuid1()
row = dict(kwargs)
row.update({'partition_key':partition_key})
return row
@classmethod
def cassandra_schema(cls):
if not cls._cassandra_schema: cls.generate_cassandra_schema()
return cls._cassandra_schema
@classmethod
def cassandra_initialize(cls):
fields = map(lambda entry: entry['field'], cls.schema)
cls._cassandra_indices = {field:i for i,field in enumerate(fields)}
@classmethod
def name(cls):
return cls.__name__.lower()
@classmethod
def cassandra_table_cql(cls):
cql = "CREATE TABLE " + cls.name() + ' (partition_key text PRIMARY KEY, '
fields = [e['field'] + ' ' + peachbox.model.Types.cassandra_type(e['type']) for e in cls.schema]
cql += ', '.join(fields)
cql += ')'
return cql
@classmethod
def cassandra_output_cql(cls):
cql = "UPDATE " + cls.mart + "." + cls.name() + " SET "
for i,entry in enumerate(cls.schema):
if (entry['field'] is not cls.key):
cql += entry['field'] + ' = ?'
if (i is not len(cls.schema)-1):
cql += ', '
return cql
@classmethod
def keyspace_name(cls):
return cls.mart.lower()
|
{
"content_hash": "4060afc4ae43b3ac4ac7e249a983bbf2",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 104,
"avg_line_length": 24.892307692307693,
"alnum_prop": 0.572929542645241,
"repo_name": "PeachstoneIO/peachbox",
"id": "c4821e5ef2082f53abce2b05255bba999a2e6490",
"size": "1618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peachbox/model/real_time_view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125688"
},
{
"name": "Shell",
"bytes": "655"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0033_abouttheeditorpage_language'),
('core', '0030_homepage_banner_content'),
]
operations = [
]
|
{
"content_hash": "73c94ca3641844771b0dee35ca932b83",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 53,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.6293103448275862,
"repo_name": "PARINetwork/pari",
"id": "43c22331b060babe53ef4c6f5829998321d11690",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/migrations/0034_merge_20210608_1415.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "94103"
},
{
"name": "HTML",
"bytes": "452629"
},
{
"name": "JavaScript",
"bytes": "124537"
},
{
"name": "Less",
"bytes": "229040"
},
{
"name": "Python",
"bytes": "479247"
},
{
"name": "Shell",
"bytes": "3919"
}
],
"symlink_target": ""
}
|
class ReadOnlyAdminPreMixin(object):
"""
Mixin for :class:`django.contrib.admin.options.ModelAdmin`
that makes the model admin read only.
Examples:
Typical usage::
from ievv_opensource.ievv_djangoadmin import ievv_djangoadmin_mixins
@admin.register(MyModel)
class MyModelAdmin(ievv_djangoadmin_mixins.ReadOnlyAdminPreMixin,
admin.ModelAdmin):
list_display = [
# ...
]
search_fields = [
# ...
]
# ...
"""
#: Overridden change form template that removes
#: the save buttons.
change_form_template = 'ievv_djangoadmin/ievv_djangoadmin_mixins/' \
'read_only_change_form.django.html'
#: Set to ``None`` - no actions by default.
actions = None
def get_readonly_fields(self, request, obj=None):
"""
Returns all the fields on the model,
so all fields are read only.
"""
return [f.name for f in self.model._meta.get_fields() if f.concrete]
def get_fields(self, request, obj=None):
return self.get_readonly_fields(request, obj=obj)
def has_add_permission(self, request):
"""
Returns ``False`` - so no add permission.
"""
return False
def has_delete_permission(self, request, id=None):
"""
Returns ``False`` - so no delete permission.
"""
return False
|
{
"content_hash": "d8c5f610d456ce421e670e2c8626dc74",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 29.615384615384617,
"alnum_prop": 0.5461038961038961,
"repo_name": "appressoas/ievv_opensource",
"id": "d46b727c2f5f694caf18325e21316deb5087e4ed",
"size": "1541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ievv_opensource/ievv_djangoadmin/ievv_djangoadmin_mixins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "199"
},
{
"name": "Dockerfile",
"bytes": "162"
},
{
"name": "HTML",
"bytes": "7544"
},
{
"name": "JavaScript",
"bytes": "719"
},
{
"name": "Less",
"bytes": "27"
},
{
"name": "Python",
"bytes": "614046"
},
{
"name": "SCSS",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "141"
},
{
"name": "TypeScript",
"bytes": "254"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append('..') # append .. to the current path so it can find Binance.py
from Binance import Binance
import logging.config
import logging.handlers
import logging
import ConfigParser
def readConfig(key):
# read in configuration
config = ConfigParser.ConfigParser()
config.read("config.ini")
return config.get('KEYS',key)
# the symbol we want to get a price for <your value here>
symbol_of_interest = (raw_input("Name of Symbol: ")).upper()
# this logging configuration is sketchy
binance = logging.getLogger(__name__)
logging.config.fileConfig('logging.ini')
# create Binance object
bn = Binance()
allPrices = bn.getTicker('allPrices')
try:
# a list comprehension for extracting the target from json
price = [x for x in allPrices if "symbol" in x and x['symbol'] == symbol_of_interest][0]['price']
except:
print "symbol not found"
else:
print "The current price of " + symbol_of_interest + " is " + price
|
{
"content_hash": "fb3d7f89c9a9fc771509a3a74625ee07",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 99,
"avg_line_length": 28.264705882352942,
"alnum_prop": 0.7148803329864725,
"repo_name": "js7558/pyBinance",
"id": "524cf0b00f0b43d9a70d367a20679f519f9801e2",
"size": "2303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example-getCurrentPrice.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62407"
}
],
"symlink_target": ""
}
|
import json
import logging
import re
from api.api_samples.python_client.api_client import CloudBoltAPIClient
from api.api_samples.python_client.samples.api_helpers import wait_for_order_completion
from common.methods import set_progress
from servicecatalog.models import ServiceBlueprint
from utilities.exceptions import CloudBoltException
from utilities.models import ConnectionInfo
from resourcehandlers.azure_arm.models import AzureARMHandler
from azure.mgmt.resource import ResourceManagementClient
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource.resources.models import ResourceGroup
from infrastructure.models import CustomField, Environment
# suppress logging from requests module
logger = logging.getLogger('requests')
logger.setLevel(40)
logger = logging.getLogger('py.warnings')
logger.setLevel(40)
API_CLIENT_CI = "CIT API Client"
# BP specific variables - You should change these
BLUEPRINT = 106
BP_PAYLOAD = """
{
"group": "/api/v2/groups/2/",
"items": {
"deploy-items": [
{
"blueprint": "/api/v2/blueprints/106/",
"blueprint-items-arguments": {
"build-item-Create Azure Kubernetes Service": {
"parameters": {
"cloudbolt-environment-a493": "89",
"cluster-dns-prefix-a493": "citdnsprefix",
"cluster-name-a493": "CITclustertest",
"cluster-pool-name-a493": "cittpool1",
"node-count-a493": "2",
"resource-groups-a493": "CITResourceGroup"
}
}
},
"resource-name": "Azure Kubernetes",
"resource-parameters": {}
}
]
},
"submit-now": "true"
}
"""
NEW_RESOURCE_NAME = 'CITclusterDontdelete'
def get_order_id_from_href(order_href):
mo = re.search("/orders/([0-9]+)", order_href)
return int(mo.groups()[0])
def test_order_blueprint(client):
order = json.loads(client.post('/api/v2/orders/', body=BP_PAYLOAD))
order_href = order['_links']['self']['href']
order_id = get_order_id_from_href(order_href)
result = wait_for_order_completion(client, order_id, 1800, 10)
if result != 0:
raise CloudBoltException("Blueprint Deployment order {} did not succeed.".format(order_id))
set_progress("Blueprint deployment order {} completed successfully.".format(order_id))
def test_delete_resource(client, resource):
body = "{}"
delete = json.loads(client.post(
'/api/v2/resources/{}/{}/actions/1/'.format(resource.resource_type.name, resource.id), body=body))
def get_api_client():
ci = ConnectionInfo.objects.get(name=API_CLIENT_CI)
return CloudBoltAPIClient(
ci.username, ci.password, ci.ip, ci.port, protocol=ci.protocol)
def run(job, *args, **kwargs):
bp = ServiceBlueprint.objects.get(id=BLUEPRINT)
set_progress(
"Running Continuous Infrastructure Test for blueprint {}".format(bp)
)
client = get_api_client()
# Order the BP
set_progress("### ORDERING BLUEPRINT ###", tasks_done=0, total_tasks=2)
test_order_blueprint(client)
resource = bp.resource_set.filter(name__iexact=NEW_RESOURCE_NAME, lifecycle='ACTIVE').first()
set_progress("RESOURCE {}".format(resource))
rce = bp.resource_set.last()
set_progress("LAST RESOURCE {}".format(rce))
resource.delete()
set_progress('### DISCOVERING RESOURCES FOR BLUEPRINT ###', tasks_done=1)
bp.sync_resources()
# Should be able to get the resource since the sync should have created it.
resource = bp.resource_set.get(nam__icontains=NEW_RESOURCE_NAME, lifecycle='ACTIVE')
set_progress("### DELETING RESOURCE FOR BLUEPRINT ###", tasks_done=1)
test_delete_resource(client, resource)
set_progress("ALL Tests completed!", tasks_done=2)
|
{
"content_hash": "3d1bbdb6db9ed73cdf242d9d2e2a553e",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 106,
"avg_line_length": 34.89473684210526,
"alnum_prop": 0.6485671191553545,
"repo_name": "CloudBoltSoftware/cloudbolt-forge",
"id": "b98fc050c47ce9433dd8ef71092e02b36938f738",
"size": "3978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blueprints/azure_k8s/cit/cit_delete_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1665"
},
{
"name": "HTML",
"bytes": "165828"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "PowerShell",
"bytes": "5779"
},
{
"name": "Python",
"bytes": "1742154"
},
{
"name": "Shell",
"bytes": "16836"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from gluon import current
from gluon.html import A, DIV, LI, URL, TAG, TD, TR, UL
from gluon.storage import Storage
from s3 import S3SQLInlineLink
def config(settings):
"""
Template settings: 'Skeleton' designed to be copied to quickly create
custom templates
All settings which are to configure a specific template are located
here. Deployers should ideally not need to edit any other files outside
of their template folder.
"""
T = current.T
#settings.base.system_name = T("Sahana Skeleton")
#settings.base.system_name_short = T("Sahana")
# PrePopulate data
settings.base.prepopulate += ("historic/Harvey", "default/users")
# Theme (folder to use for views/layout.html)
#settings.base.theme = "historic.Harvey"
# Authentication settings
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
#settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
#settings.auth.registration_requests_organisation = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
settings.gis.countries = ("US",)
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to Disable the Postcode selector in the LocationSelector
#settings.gis.postcode_selector = False # @ToDo: Vary by country (include in the gis_config!)
# Uncomment to show the Print control:
# http://eden.sahanafoundation.org/wiki/UserGuidelines/Admin/MapPrinting
#settings.gis.print_button = True
# L10n settings
# Languages used in the deployment (used for Language Toolbar & GIS Locations)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
#settings.L10n.languages = OrderedDict([
# ("ar", "العربية"),
# ("bs", "Bosanski"),
# ("en", "English"),
# ("fr", "Français"),
# ("de", "Deutsch"),
# ("el", "ελληνικά"),
# ("es", "Español"),
# ("it", "Italiano"),
# ("ja", "日本語"),
# ("km", "ភាសាខ្មែរ"),
# ("ko", "한국어"),
# ("ne", "नेपाली"), # Nepali
# ("prs", "دری"), # Dari
# ("ps", "پښتو"), # Pashto
# ("pt", "Português"),
# ("pt-br", "Português (Brasil)"),
# ("ru", "русский"),
# ("tet", "Tetum"),
# ("tl", "Tagalog"),
# ("tr", "Türkçe"),
# ("ur", "اردو"),
# ("vi", "Tiếng Việt"),
# ("zh-cn", "中文 (简体)"),
# ("zh-tw", "中文 (繁體)"),
#])
# Default language for Language Toolbar (& GIS Locations in future)
#settings.L10n.default_language = "en"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
#settings.L10n.utc_offset = "+0100"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate Layer Names
#settings.L10n.translate_gis_layer = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Uncomment this to Translate Organisation Names/Acronyms
#settings.L10n.translate_org_organisation = True
# Finance settings
#settings.fin.currencies = {
# "EUR" : "Euros",
# "GBP" : "Great British Pounds",
# "USD" : "United States Dollars",
#}
#settings.fin.currency_default = "USD"
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
#
#settings.security.policy = 7 # Organisation-ACLs
# -------------------------------------------------------------------------
# Organisations
settings.org.tags = True
settings.org.service_locations = True
# -------------------------------------------------------------------------
# Project Module
settings.project.projects = True
settings.project.mode_3w = True
settings.project.activities = True
settings.project.activity_types = True
settings.project.sectors = True
settings.project.multiple_organisations = True
settings.project.assign_staff_tab = False
# -------------------------------------------------------------------------
# Requests Module
#
settings.req.recurring = False
settings.req.use_req_number = False
settings.req.requester_optional = True
settings.req.summary = True
settings.req.use_commit = False
settings.req.ask_transport = True
settings.req.req_type = ("Stock",)
# -------------------------------------------------------------------------
# Shelter Module
#
settings.cr.people_registration = False
# -------------------------------------------------------------------------
def customise_project_location_resource(r, tablename):
s3db = current.s3db
table = s3db.project_location
# Allow editing of names
field = table.name
field.readable = field.writable = True
# Hide percentage field (not needed)
field = table.percentage
field.readable = field.writable = False
# Use location selector
from s3 import S3LocationSelector
field = table.location_id
field.widget = S3LocationSelector(show_address=True)
# List fields
list_fields = ["project_id",
"name",
"location_id",
"location_id$addr_street",
"activity_type_location.activity_type_id",
]
# CRUD Form
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("project_id",
"name",
"location_id",
S3SQLInlineLink("activity_type",
field = "activity_type_id",
multiple = True,
),
"comments",
)
# Reconfigure resource
s3db.configure("project_location",
crud_form = crud_form,
list_fields = list_fields,
create_next = None,
onaccept = None,
)
settings.customise_project_location_resource = customise_project_location_resource
# -------------------------------------------------------------------------
def customise_org_facility_resource(r, tablename):
from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter
filter_widgets = [
S3TextFilter(["name"],
label = T("Search"),
comment = T("Search by facility name. You can use * as wildcard."),
),
S3OptionsFilter("site_facility_type.facility_type_id",
),
S3OptionsFilter("organisation_id",
),
S3LocationFilter("location_id",
),
]
s3db = current.s3db
s3db.configure(tablename,
filter_widgets = filter_widgets,
)
# Customize fields
table = s3db.org_facility
# Main facility flag visible and in custom crud form
field = table.main_facility
field.readable = field.writable = True
crud_form = s3db.get_config(tablename, "crud_form")
crud_form.insert(-2, "main_facility")
# "Obsolete" labeled as "inactive"
field = table.obsolete
field.label = T("Inactive")
# Show Last Updated field in list view
list_fields = s3db.get_config(tablename, "list_fields")
list_fields.append((T("Last Updated"), "modified_on"))
settings.customise_org_facility_resource = customise_org_facility_resource
# -------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
from gluon.html import DIV, INPUT
from s3 import s3_comments_widget, \
S3LocationSelector, \
S3MultiSelectWidget, \
S3SQLCustomForm, \
S3SQLInlineComponent, \
S3SQLVerticalSubFormLayout
s3db = current.s3db
# Filtered component to access phone number and email
s3db.add_components(tablename,
org_facility = {"name": "main_facility",
"joinby": "organisation_id",
"filterby": {
"main_facility": True,
},
},
)
s3db.org_organisation_location.location_id.widget = S3LocationSelector(levels=("L2", "L3"),
show_map=False,
labels=False,
)
crud_fields = ["name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
label = T("Type"),
multiple = False,
),
S3SQLInlineLink(
"service",
label = T("Services"),
field = "service_id",
),
S3SQLInlineComponent(
"facility",
label = T("Main Facility"),
fields = ["name",
"phone1",
"phone2",
"email",
"location_id",
],
layout = S3SQLVerticalSubFormLayout,
filterby = {"field": "main_facility",
"options": True,
},
multiple = False,
),
"website",
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER",
),
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK",
),
),
"comments",
]
crud_form = S3SQLCustomForm(*crud_fields)
from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter#, S3HierarchyFilter
filter_widgets = [
S3TextFilter(["name", "acronym"],
label = T("Search"),
comment = T("Search by organization name or acronym. You can use * as wildcard."),
_class = "filter-search",
),
S3LocationFilter("org_facility.location_id",
label = T("Location"),
#hidden = True,
),
S3OptionsFilter("organisation_organisation_type.organisation_type_id",
label = T("Type"),
#hidden = True,
),
S3OptionsFilter("service_organisation.service_id",
#hidden = True,
),
]
list_fields = ["name",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
(T("Services"), "service.name"),
(T("Adresse"), "main_facility.location_id"),
(T("Phone #"), "main_facility.phone1"),
(T("Email"), "main_facility.email"),
(T("Facebook"), "facebook.value"),
"website",
(T("Last Updated"), "modified_on"),
]
s3db.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
tabs = [(T("Basic Details"), None),
(T("Service Locations"), "service_location"),
(T("Needs"), "needs"),
(T("Facilities"), "facility"),
(T("Warehouses"), "warehouse"),
(T("Offices"), "office"),
(T("Staff & Volunteers"), "human_resource"),
#(T("Assets"), "asset"),
#(T("Projects"), "project"),
#(T("User Roles"), "roles"),
#(T("Tasks"), "task"),
]
rheader = lambda r: current.s3db.org_rheader(r, tabs=tabs)
attr["rheader"] = rheader
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -------------------------------------------------------------------------
def customise_req_organisation_needs_resource(r, tablename):
s3db = current.s3db
table = current.s3db.req_organisation_needs
CASH = T("Cash Donations needed")
if r.tablename == "req_organisation_needs":
from s3 import IS_ONE_OF, S3DateTime
# Allow only organisations which do not have a needs record
# yet (single component):
field = table.organisation_id
dbset = current.db(table.id == None)
left = table.on(table.organisation_id == current.s3db.org_organisation.id)
field.requires = IS_ONE_OF(dbset, "org_organisation.id",
field.represent,
left = left,
orderby = "org_organisation.name",
sort = True,
)
# Format modified_on as date
field = table.modified_on
field.represent = lambda d: S3DateTime.date_represent(d, utc=True)
if r.representation in ("html", "aadata", "iframe"):
# Structured lists for interactive views
from gluon import Field
table.needs_skills = Field.Method(lambda row: \
organisation_needs(row, need_type="skills"))
table.needs_items = Field.Method(lambda row: \
organisation_needs(row, need_type="items"))
current.response.s3.stylesheets.append("../themes/RW/needs.css")
needs_skills = (T("Volunteers needed"), "needs_skills")
needs_items = (T("Supplies needed"), "needs_items")
# Filter widgets
from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter
filter_widgets = [#S3TextFilter(["organisation_id$name",
# ],
# label = T("Search"),
# ),
S3OptionsFilter("organisation_id"),
S3OptionsFilter("organisation_needs_skill.skill_id",
label = T("Skills sought"),
),
S3OptionsFilter("organisation_needs_item.item_id",
label = T("Supplies sought"),
),
S3LocationFilter("organisation_id$active_service_location.site_id$location_id",
),
]
# CRUD form
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm(
"organisation_id",
S3SQLInlineComponent("organisation_needs_skill",
label = T("Volunteers needed"),
fields = ["skill_id",
"demand",
"comments",
],
),
S3SQLInlineComponent("organisation_needs_item",
label = T("Supplies needed"),
fields = ["item_id",
"demand",
"comments",
],
),
(CASH, "money"),
"money_details",
#"vol",
#"vol_details",
)
next_page = r.url(method="") \
if r.tablename == "req_organisation_needs" else None
s3db.configure("req_organisation_needs",
crud_form = crud_form,
filter_widgets = filter_widgets,
create_next = next_page,
update_next = next_page,
)
else:
# Simple fields for exports
needs_skills = (T("Volunteers needed"),
"organisation_needs_skill.skill_id")
needs_items = (T("Supplies needed"),
"organisation_needs_item.item_id")
# List fields (all formats)
list_fields = ["organisation_id",
needs_skills,
needs_items,
(CASH, "money"),
(T("Cash Donation Details"), "money_details"),
(T("Last Update"), "modified_on"),
]
s3db.configure("req_organisation_needs",
list_fields = list_fields,
)
settings.customise_req_organisation_needs_resource = customise_req_organisation_needs_resource
# -------------------------------------------------------------------------
def customise_req_site_needs_resource(r, tablename):
if r.tablename == "req_site_needs":
table = r.table
field = table.site_id
field.label = current.T("Facility")
field.readable = field.writable = True
# Allow only facilities which do not have a req_site_needs
# yet (single component), and filter out obsolete facilities
from s3 import IS_ONE_OF, FS
dbset = current.db(table.id == None)
left = table.on(table.site_id == current.s3db.org_site.id)
field.requires = IS_ONE_OF(dbset, "org_site.site_id",
field.represent,
left = left,
not_filterby = "obsolete",
not_filter_opts = (True,),
orderby = "org_site.name",
sort = True,
)
if not r.record:
query = FS("site_id$obsolete") != True
r.resource.add_filter(query)
# Allow adding of facilities in popup
from s3layouts import S3PopupLink
field.comment = S3PopupLink(c = "org",
f = "facility",
vars = {"child": "site_id",
"parent": "site_needs",
},
title = T("Add New Facility"),
)
# Filters
from s3 import S3LocationFilter, S3TextFilter
filter_widgets = [S3TextFilter(["site_id$name",
"vol_details",
"goods_details",
],
label = T("Search"),
),
S3LocationFilter("site_id$location_id",
),
]
# List fields
list_fields = [(T("Facility"), "site_id$name"),
"site_id$location_id",
("%s?" % T("Volunteers"), "vol"),
(T("Help Wanted"), "vol_details"),
("%s?" % T("Donations"), "goods"),
(T("Donations Needed"), "goods_details"),
"modified_on",
]
current.s3db.configure("req_site_needs",
filter_widgets = filter_widgets,
list_fields = list_fields,
)
settings.customise_req_site_needs_resource = customise_req_site_needs_resource
# -------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
#("sync", Storage(
# name_nice = T("Synchronization"),
# #description = "Synchronization",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu & access the controller
# module_type = None # This item is handled separately for the menu
#)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
#("hrm", Storage(
# name_nice = T("Staff"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
#)),
#("vol", Storage(
# name_nice = T("Volunteers"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
#)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
#("doc", Storage(
# name_nice = T("Documents"),
# #description = "A library of digital resources, such as photos, documents and reports",
# restricted = True,
# module_type = 10,
#)),
#("msg", Storage(
# name_nice = T("Messaging"),
# #description = "Sends & Receives Alerts via Email & SMS",
# restricted = True,
# # The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
# module_type = None,
#)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 4
)),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 5,
)),
# Vehicle depends on Assets
#("vehicle", Storage(
# name_nice = T("Vehicles"),
# #description = "Manage Vehicles",
# restricted = True,
# module_type = 10,
#)),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 10,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 2
)),
("cr", Storage(
name_nice = T("Shelters"),
#description = "Tracks the location, capacity and breakdown of victims in Shelters",
restricted = True,
module_type = 10
)),
#("hms", Storage(
# name_nice = T("Hospitals"),
# #description = "Helps to monitor status of hospitals",
# restricted = True,
# module_type = 10
#)),
("dc", Storage(
name_nice = T("Data Collection"),
#description = "Allow affected individuals & households to register to receive compensation and distributions",
restricted = True,
module_type = 10,
)),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# restricted = True,
# module_type = 10,
#)),
#("event", Storage(
# name_nice = T("Events"),
# #description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
# restricted = True,
# module_type = 10,
#)),
#("transport", Storage(
# name_nice = T("Transport"),
# restricted = True,
# module_type = 10,
#)),
#("stats", Storage(
# name_nice = T("Statistics"),
# #description = "Manages statistics",
# restricted = True,
# module_type = None,
#)),
])
# END =========================================================================
|
{
"content_hash": "b6f7693c7de3adfbd8211082c29843dd",
"timestamp": "",
"source": "github",
"line_count": 743,
"max_line_length": 151,
"avg_line_length": 42.0228802153432,
"alnum_prop": 0.45456874739775166,
"repo_name": "flavour/eden",
"id": "5388ac6633e71ad9c1d704a61459cf230e201880",
"size": "31349",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "modules/templates/historic/Harvey/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3351335"
},
{
"name": "HTML",
"bytes": "1367727"
},
{
"name": "JavaScript",
"bytes": "20109418"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31407527"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3274119"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class ElasticsearchappConfig(AppConfig):
name = 'elasticsearchapp'
def ready(self):
import elasticsearchapp.signals
|
{
"content_hash": "efa3f34832bebe2aede11e11136ae098",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 21,
"alnum_prop": 0.75,
"repo_name": "aminhp93/learning_python",
"id": "fc035aed3e55235353853d1230b3dec8c04003e1",
"size": "168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/posts/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "181187"
},
{
"name": "HTML",
"bytes": "24685"
},
{
"name": "JavaScript",
"bytes": "2208340"
},
{
"name": "Python",
"bytes": "57619"
}
],
"symlink_target": ""
}
|
"""
Generates a report for English Wikipedia articles with no Wikidata item
Copyright (C) 2015 James Hare
Licensed under MIT License: http://mitlicense.org
"""
from urllib.parse import quote
import pywikibot
from project_index import WikiProjectTools
def main():
wptools = WikiProjectTools()
bot = pywikibot.Site('en', 'wikipedia')
q = ('select page_title from page where page_namespace = 0 '
'and page_is_redirect = 0 and page_id not in '
'(select page_id from page join page_props on pp_page = page_id '
'where page_namespace = 0 and pp_propname = "wikibase_item") '
'order by page_id;')
no_wikidata = [x[0].decode('utf-8') for x in wptools.query('wiki', q, None)]
total_count = len(no_wikidata) # Capturing this before truncating list
no_wikidata = no_wikidata[:100]
page = pywikibot.Page(bot, 'User:Reports_bot/No_Wikidata_item')
content = "'''Total Articles Missing From Wikidata:''' " + str(total_count) + "\n\n"
for title in no_wikidata:
content += "* [[" + title.replace('_', ' ') + \
"]] ([https://www.wikidata.org/w/index.php?search=" + \
quote(title) + \
"&title=Special%3ASearch&fulltext=1 Search on Wikidata])\n"
page.text = content
page.save("Updating list", minor=False, quiet=True)
if __name__ == "__main__":
main()
|
{
"content_hash": "93e0fce60aa9e22d0582c05214d49a13",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 34.9,
"alnum_prop": 0.6203438395415473,
"repo_name": "harej/reports_bot",
"id": "00fe620bf466953364d0deb1b7b3abb4e0023b37",
"size": "1420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unported/no_wikidata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "164261"
}
],
"symlink_target": ""
}
|
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
def element_send_keys(session, element, text):
return session.transport.send(
"POST", "/session/{session_id}/element/{element_id}/value".format(
session_id=session.session_id,
element_id=element.id),
{"text": text})
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog, inline):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<input type=text>")
element = session.find.css("input", all=False)
create_dialog(dialog_type, text=dialog_type)
response = element_send_keys(session, element, "foo")
assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert element.property("value") == "foo"
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog, inline):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<input type=text>")
element = session.find.css("input", all=False)
create_dialog(dialog_type, text=dialog_type)
response = element_send_keys(session, element, "foo")
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert element.property("value") == ""
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<input type=text>")
element = session.find.css("input", all=False)
create_dialog(dialog_type, text=dialog_type)
response = element_send_keys(session, element, "foo")
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert element.property("value") == ""
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
|
{
"content_hash": "1539ea8b0e9ebdb076d929b2fcf49f71",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 90,
"avg_line_length": 34.413223140495866,
"alnum_prop": 0.6954851104707013,
"repo_name": "chromium/chromium",
"id": "c1046840fa4ef24a5e063d09aee8c58803e835f7",
"size": "4186",
"binary": false,
"copies": "22",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/webdriver/tests/element_send_keys/user_prompts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Finds browsers that can be controlled by telemetry."""
import logging
import operator
from telemetry import decorators
from telemetry.internal.backends.chrome import android_browser_finder
from telemetry.internal.backends.chrome import cros_browser_finder
from telemetry.internal.backends.chrome import desktop_browser_finder
from telemetry.internal.backends.chrome import ios_browser_finder
from telemetry.internal.backends.mandoline import android_mandoline_finder
from telemetry.internal.backends.mandoline import desktop_mandoline_finder
from telemetry.internal.backends.remote import trybot_browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.internal.platform import device_finder
BROWSER_FINDERS = [
desktop_browser_finder,
android_browser_finder,
cros_browser_finder,
ios_browser_finder,
trybot_browser_finder,
desktop_mandoline_finder,
android_mandoline_finder,
]
def FindAllBrowserTypes(options):
return reduce(operator.add,
[bf.FindAllBrowserTypes(options) for bf in BROWSER_FINDERS])
@decorators.Cache
def FindBrowser(options):
"""Finds the best PossibleBrowser object given a BrowserOptions object.
Args:
A BrowserOptions object.
Returns:
A PossibleBrowser object.
Raises:
BrowserFinderException: Options improperly set, or an error occurred.
"""
if options.__class__.__name__ == '_FakeBrowserFinderOptions':
return options.fake_possible_browser
if options.browser_type == 'exact' and options.browser_executable == None:
raise browser_finder_exceptions.BrowserFinderException(
'--browser=exact requires --browser-executable to be set.')
if options.browser_type != 'exact' and options.browser_executable != None:
raise browser_finder_exceptions.BrowserFinderException(
'--browser-executable requires --browser=exact.')
if options.browser_type == 'cros-chrome' and options.cros_remote == None:
raise browser_finder_exceptions.BrowserFinderException(
'browser_type=cros-chrome requires cros_remote be set.')
if (options.browser_type != 'cros-chrome' and
options.browser_type != 'cros-chrome-guest' and
options.cros_remote != None):
raise browser_finder_exceptions.BrowserFinderException(
'--remote requires --browser=cros-chrome or cros-chrome-guest.')
devices = device_finder.GetDevicesMatchingOptions(options)
browsers = []
default_browsers = []
for device in devices:
for finder in BROWSER_FINDERS:
if(options.browser_type and options.browser_type != 'any' and
options.browser_type not in finder.FindAllBrowserTypes(options)):
continue
curr_browsers = finder.FindAllAvailableBrowsers(options, device)
new_default_browser = finder.SelectDefaultBrowser(curr_browsers)
if new_default_browser:
default_browsers.append(new_default_browser)
browsers.extend(curr_browsers)
if options.browser_type == None:
if default_browsers:
default_browser = sorted(default_browsers,
key=lambda b: b.last_modification_time())[-1]
logging.warning('--browser omitted. Using most recent local build: %s' %
default_browser.browser_type)
default_browser.UpdateExecutableIfNeeded()
return default_browser
if len(browsers) == 1:
logging.warning('--browser omitted. Using only available browser: %s' %
browsers[0].browser_type)
browsers[0].UpdateExecutableIfNeeded()
return browsers[0]
raise browser_finder_exceptions.BrowserTypeRequiredException(
'--browser must be specified. Available browsers:\n%s' %
'\n'.join(sorted(set([b.browser_type for b in browsers]))))
if options.browser_type == 'any':
types = FindAllBrowserTypes(options)
def CompareBrowsersOnTypePriority(x, y):
x_idx = types.index(x.browser_type)
y_idx = types.index(y.browser_type)
return x_idx - y_idx
browsers.sort(CompareBrowsersOnTypePriority)
if len(browsers) >= 1:
browsers[0].UpdateExecutableIfNeeded()
return browsers[0]
else:
return None
matching_browsers = [b for b in browsers
if b.browser_type == options.browser_type and b.SupportsOptions(options)]
chosen_browser = None
if len(matching_browsers) == 1:
chosen_browser = matching_browsers[0]
elif len(matching_browsers) > 1:
logging.warning('Multiple browsers of the same type found: %s' % (
repr(matching_browsers)))
chosen_browser = sorted(matching_browsers,
key=lambda b: b.last_modification_time())[-1]
if chosen_browser:
logging.info('Chose browser: %s' % (repr(chosen_browser)))
chosen_browser.UpdateExecutableIfNeeded()
return chosen_browser
@decorators.Cache
def GetAllAvailableBrowsers(options, device):
"""Returns a list of available browsers on the device.
Args:
options: A BrowserOptions object.
device: The target device, which can be None.
Returns:
A list of browser instances.
Raises:
BrowserFinderException: Options are improperly set, or an error occurred.
"""
if not device:
return []
possible_browsers = []
for browser_finder in BROWSER_FINDERS:
possible_browsers.extend(
browser_finder.FindAllAvailableBrowsers(options, device))
return possible_browsers
@decorators.Cache
def GetAllAvailableBrowserTypes(options):
"""Returns a list of available browser types.
Args:
options: A BrowserOptions object.
Returns:
A list of browser type strings.
Raises:
BrowserFinderException: Options are improperly set, or an error occurred.
"""
devices = device_finder.GetDevicesMatchingOptions(options)
possible_browsers = []
for device in devices:
possible_browsers.extend(GetAllAvailableBrowsers(options, device))
type_list = set([browser.browser_type for browser in possible_browsers])
# The reference build should be available for mac, linux and win, but the
# desktop browser finder won't return it in the list of browsers.
for browser in possible_browsers:
if (browser.target_os == 'darwin' or browser.target_os.startswith('linux')
or browser.target_os.startswith('win')):
type_list.add('reference')
break
type_list = list(type_list)
type_list.sort()
return type_list
|
{
"content_hash": "72145a21cd349fd4e9f916d723d18a63",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 79,
"avg_line_length": 35.40555555555556,
"alnum_prop": 0.7170877137925624,
"repo_name": "Workday/OpenFrame",
"id": "8123196e0fa7ea349574265010f7b3f48e6afb90",
"size": "6536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/internal/browser/browser_finder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
# -1 is a placeholder for indexing purposes.
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes.
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us, timespec='auto'):
specs = {
'hours': '{:02d}',
'minutes': '{:02d}:{:02d}',
'seconds': '{:02d}:{:02d}:{:02d}',
'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}',
'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}'
}
if timespec == 'auto':
# Skip trailing microseconds when us==0.
timespec = 'microseconds' if us else 'seconds'
elif timespec == 'milliseconds':
us //= 1000
try:
fmt = specs[timespec]
except KeyError:
raise ValueError('Unknown timespec value')
else:
return fmt.format(hh, mm, ss, us)
def _format_offset(off):
s = ''
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
mm, ss = divmod(mm, timedelta(minutes=1))
s += "%s%02d:%02d" % (sign, hh, mm)
if ss or ss.microseconds:
s += ":%02d" % ss.seconds
if ss.microseconds:
s += '.%06d' % ss.microseconds
return s
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, rest = divmod(offset, timedelta(hours=1))
m, rest = divmod(rest, timedelta(minutes=1))
s = rest.seconds
u = offset.microseconds
if u:
zreplace = '%c%02d%02d%02d.%06d' % (sign, h, m, s, u)
elif s:
zreplace = '%c%02d%02d%02d' % (sign, h, m, s)
else:
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
# Helpers for parsing the result of isoformat()
def _parse_isoformat_date(dtstr):
# It is assumed that this function will only be called with a
# string of length exactly 10, and (though this is not used) ASCII-only
year = int(dtstr[0:4])
if dtstr[4] != '-':
raise ValueError('Invalid date separator: %s' % dtstr[4])
month = int(dtstr[5:7])
if dtstr[7] != '-':
raise ValueError('Invalid date separator')
day = int(dtstr[8:10])
return [year, month, day]
def _parse_hh_mm_ss_ff(tstr):
# Parses things of the form HH[:MM[:SS[.fff[fff]]]]
len_str = len(tstr)
time_comps = [0, 0, 0, 0]
pos = 0
for comp in range(0, 3):
if (len_str - pos) < 2:
raise ValueError('Incomplete time component')
time_comps[comp] = int(tstr[pos:pos+2])
pos += 2
next_char = tstr[pos:pos+1]
if not next_char or comp >= 2:
break
if next_char != ':':
raise ValueError('Invalid time separator: %c' % next_char)
pos += 1
if pos < len_str:
if tstr[pos] != '.':
raise ValueError('Invalid microsecond component')
else:
pos += 1
len_remainder = len_str - pos
if len_remainder not in (3, 6):
raise ValueError('Invalid microsecond component')
time_comps[3] = int(tstr[pos:])
if len_remainder == 3:
time_comps[3] *= 1000
return time_comps
def _parse_isoformat_time(tstr):
# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]
len_str = len(tstr)
if len_str < 2:
raise ValueError('Isoformat time too short')
# This is equivalent to re.search('[+-]', tstr), but faster
tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1)
timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr
time_comps = _parse_hh_mm_ss_ff(timestr)
tzi = None
if tz_pos > 0:
tzstr = tstr[tz_pos:]
# Valid time zone strings are:
# HH:MM len: 5
# HH:MM:SS len: 8
# HH:MM:SS.ffffff len: 15
if len(tzstr) not in (5, 8, 15):
raise ValueError('Malformed time zone string')
tz_comps = _parse_hh_mm_ss_ff(tzstr)
if all(x == 0 for x in tz_comps):
tzi = timezone.utc
else:
tzsign = -1 if tstr[tz_pos - 1] == '-' else 1
td = timedelta(hours=tz_comps[0], minutes=tz_comps[1],
seconds=tz_comps[2], microseconds=tz_comps[3])
tzi = timezone(tzsign * td)
time_comps.append(tzi)
return time_comps
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be strictly between "
"-timedelta(hours=24) and timedelta(hours=24)" %
(name, offset))
def _check_int_field(value):
if isinstance(value, int):
return value
if not isinstance(value, float):
try:
value = value.__int__()
except AttributeError:
pass
else:
if isinstance(value, int):
return value
raise TypeError('__int__ returned non-int (type %s)' %
type(value).__name__)
raise TypeError('an integer is required (got type %s)' %
type(value).__name__)
raise TypeError('integer argument expected, got float')
def _check_date_fields(year, month, day):
year = _check_int_field(year)
month = _check_int_field(month)
day = _check_int_field(day)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
return year, month, day
def _check_time_fields(hour, minute, second, microsecond, fold):
hour = _check_int_field(hour)
minute = _check_int_field(minute)
second = _check_int_field(second)
microsecond = _check_int_field(microsecond)
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
if fold not in (0, 1):
raise ValueError('fold must be either 0 or 1', fold)
return hour, minute, second, microsecond, fold
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
def _divide_and_round(a, b):
"""divide a by b and round result to the nearest integer
When the ratio is exactly half-way between two integers,
the even integer is returned.
"""
# Based on the reference implementation for divmod_near
# in Objects/longobject.c.
q, r = divmod(a, b)
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
# positive, 2 * r < b if b negative.
r *= 2
greater_than_half = r > b if b > 0 else r < b
if greater_than_half or r == b and q % 2 == 1:
q += 1
return q
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds = round(microseconds + usdouble)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
else:
microseconds = int(microseconds)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
microseconds = round(microseconds + usdouble)
assert isinstance(s, int)
assert isinstance(microseconds, int)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
seconds, us = divmod(microseconds, 1000000)
s += seconds
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def __repr__(self):
args = []
if self._days:
args.append("days=%d" % self._days)
if self._seconds:
args.append("seconds=%d" % self._seconds)
if self._microseconds:
args.append("microseconds=%d" % self._microseconds)
if not args:
args.append('0')
return "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
', '.join(args))
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds) * 10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
usec = self._to_microseconds()
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(usec * a, b))
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, _divide_and_round(usec, other))
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(b * usec, a))
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if month is None and isinstance(year, bytes) and len(year) == 4 and \
1 <= year[2] <= 12:
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Construct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
@classmethod
def fromisoformat(cls, date_string):
"""Construct a date from the output of date.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
try:
assert len(date_string) == 10
return cls(*_parse_isoformat_date(date_string))
except Exception:
raise ValueError('Invalid isoformat string: %s' % date_string)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
# __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return type(self)(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
(used with permission)
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> timedelta, positive for east of UTC, negative for west of UTC"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset as timedelta, positive for east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo, fold
"""
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold'
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
fold (keyword only, default to zero)
"""
if isinstance(hour, bytes) and len(hour) == 6 and hour[0]&0x7F < 24:
# Pickle support
self = object.__new__(cls)
self.__setstate(hour, minute or None)
self._hashcode = -1
return self
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@property
def fold(self):
return self._fold
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if not tzoff: # zero or None
self._hashcode = hash(t._getstate()[0])
else:
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
self._hashcode = hash(time(h, m, self.second, self.microsecond))
else:
self._hashcode = hash((h, m, self.second, self.microsecond))
return self._hashcode
# Conversion to string
def _tzstr(self):
"""Return formatted timezone offset (+xx:xx) or an empty string."""
off = self.utcoffset()
return _format_offset(off)
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s.%s(%d, %d%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def isoformat(self, timespec='auto'):
"""Return the time formatted according to ISO.
The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional
part is omitted if self.microsecond == 0.
The optional argument timespec specifies the number of additional
terms of the time to include.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond, timespec)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
@classmethod
def fromisoformat(cls, time_string):
"""Construct a time from the output of isoformat()."""
if not isinstance(time_string, str):
raise TypeError('fromisoformat: argument must be str')
try:
return cls(*_parse_isoformat_time(time_string))
except Exception:
raise ValueError('Invalid isoformat string: %s' % time_string)
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset as timedelta, positive east of UTC
(negative west of UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
positive eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True, *, fold=None):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self._fold
return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold)
# Pickle support.
def _getstate(self, protocol=3):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
h = self._hour
if self._fold and protocol > 3:
h += 128
basestate = bytes([h, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
h, self._minute, self._second, us1, us2, us3 = string
if h > 127:
self._fold = 1
self._hour = h - 128
else:
self._fold = 0
self._hour = h
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (time, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None, *, fold=0):
if isinstance(year, bytes) and len(year) == 10 and 1 <= year[2]&0x7F <= 12:
# Pickle support
self = object.__new__(cls)
self.__setstate(year, month)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@property
def fold(self):
return self._fold
@classmethod
def _fromtimestamp(cls, t, utc, tz):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
frac, t = _math.modf(t)
us = round(frac * 1e6)
if us >= 1000000:
t += 1
us -= 1000000
elif us < 0:
t -= 1
us += 1000000
converter = _time.gmtime if utc else _time.localtime
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is None:
# As of version 2015f max fold in IANA database is
# 23 hours at 1969-09-30 13:00:00 in Kwajalein.
# Let's probe 24 hours in the past to detect a transition:
max_fold_seconds = 24 * 3600
y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6]
probe1 = cls(y, m, d, hh, mm, ss, us, tz)
trans = result - probe1 - timedelta(0, max_fold_seconds)
if trans.days < 0:
y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6]
probe2 = cls(y, m, d, hh, mm, ss, us, tz)
if probe2 == result:
result._fold = 1
else:
result = tz.fromutc(result)
return result
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
return cls._fromtimestamp(t, tz is not None, tz)
@classmethod
def utcfromtimestamp(cls, t):
"""Construct a naive UTC datetime from a POSIX timestamp."""
return cls._fromtimestamp(t, True, None)
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time, tzinfo=True):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
if tzinfo is True:
tzinfo = time.tzinfo
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
tzinfo, fold=time.fold)
@classmethod
def fromisoformat(cls, date_string):
"""Construct a datetime from the output of datetime.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
# Split this at the separator
dstr = date_string[0:10]
tstr = date_string[11:]
try:
date_components = _parse_isoformat_date(dstr)
except ValueError:
raise ValueError('Invalid isoformat string: %s' % date_string)
if tstr:
try:
time_components = _parse_isoformat_time(tstr)
except ValueError:
raise ValueError('Invalid isoformat string: %s' % date_string)
else:
time_components = [0, 0, 0, 0, None]
return cls(*(date_components + time_components))
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def _mktime(self):
"""Return integer POSIX timestamp."""
epoch = datetime(1970, 1, 1)
max_fold_seconds = 24 * 3600
t = (self - epoch) // timedelta(0, 1)
def local(u):
y, m, d, hh, mm, ss = _time.localtime(u)[:6]
return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)
# Our goal is to solve t = local(u) for u.
a = local(t) - t
u1 = t - a
t1 = local(u1)
if t1 == t:
# We found one solution, but it may not be the one we need.
# Look for an earlier solution (if `fold` is 0), or a
# later one (if `fold` is 1).
u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]
b = local(u2) - u2
if a == b:
return u1
else:
b = t1 - u1
assert a != b
u2 = t - b
t2 = local(u2)
if t2 == t:
return u2
if t1 == t:
return u1
# We have found both offsets a and b, but neither t - a nor t - b is
# a solution. This means t is in the gap.
return (max, min)[self.fold](u1, u2)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
s = self._mktime()
return s + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo, fold=self.fold)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True,
*, fold=None):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self.fold
return type(self)(year, month, day, hour, minute, second,
microsecond, tzinfo, fold=fold)
def _local_timezone(self):
if self.tzinfo is None:
ts = self._mktime()
else:
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
delta = local - datetime(*_time.gmtime(ts)[:6])
zone = _time.strftime('%Z', localtm)
tz = timezone(delta, zone)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
return tz
def astimezone(self, tz=None):
if tz is None:
tz = self._local_timezone()
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
mytz = self._local_timezone()
myoffset = mytz.utcoffset(self)
else:
myoffset = mytz.utcoffset(self)
if myoffset is None:
mytz = self.replace(tzinfo=None)._local_timezone()
myoffset = mytz.utcoffset(self)
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T', timespec='auto'):
"""Return the time formatted according to ISO.
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
By default, the fractional part is omitted if self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
The optional argument timespec specifies the number of additional
terms of the time to include.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond, timespec))
off = self.utcoffset()
tz = _format_offset(off)
if tz:
s += tz
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
", ".join(map(str, L)))
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset as timedelta positive east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
positive eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
# Assume that allow_mixed means that we are called from __eq__
if allow_mixed:
if myoff != self.replace(fold=not self.fold).utcoffset():
return 2
if otoff != other.replace(fold=not other.fold).utcoffset():
return 2
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if tzoff is None:
self._hashcode = hash(t._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
return self._hashcode
# Pickle support.
def _getstate(self, protocol=3):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
m = self._month
if self._fold and protocol > 3:
m += 128
basestate = bytes([yhi, ylo, m, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, m, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
if m > 127:
self._fold = 1
self._month = m - 128
else:
self._fold = 0
self._month = m
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (self.__class__, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta "
"strictly between -timedelta(hours=24) and "
"timedelta(hours=24).")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s.%s(%r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset)
return "%s.%s(%r, %r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if not delta:
return 'UTC'
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes, rest = divmod(rest, timedelta(minutes=1))
seconds = rest.seconds
microseconds = rest.microseconds
if microseconds:
return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
f'.{microseconds:06d}')
if seconds:
return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
return f'UTC{sign}{hours:02d}:{minutes:02d}'
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
# Some time zone algebra. For a datetime x, let
# x.n = x stripped of its timezone -- its naive time.
# x.o = x.utcoffset(), and assuming that doesn't raise an exception or
# return None
# x.d = x.dst(), and assuming that doesn't raise an exception or
# return None
# x.s = x's standard offset, x.o - x.d
#
# Now some derived rules, where k is a duration (timedelta).
#
# 1. x.o = x.s + x.d
# This follows from the definition of x.s.
#
# 2. If x and y have the same tzinfo member, x.s = y.s.
# This is actually a requirement, an assumption we need to make about
# sane tzinfo classes.
#
# 3. The naive UTC time corresponding to x is x.n - x.o.
# This is again a requirement for a sane tzinfo class.
#
# 4. (x+k).s = x.s
# This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
#
# 5. (x+k).n = x.n + k
# Again follows from how arithmetic is defined.
#
# Now we can explain tz.fromutc(x). Let's assume it's an interesting case
# (meaning that the various tzinfo methods exist, and don't blow up or return
# None when called).
#
# The function wants to return a datetime y with timezone tz, equivalent to x.
# x is already in UTC.
#
# By #3, we want
#
# y.n - y.o = x.n [1]
#
# The algorithm starts by attaching tz to x.n, and calling that y. So
# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
# becomes true; in effect, we want to solve [2] for k:
#
# (y+k).n - (y+k).o = x.n [2]
#
# By #1, this is the same as
#
# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]
#
# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
# Substituting that into [3],
#
# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
# k - (y+k).s - (y+k).d = 0; rearranging,
# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
# k = y.s - (y+k).d
#
# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
# approximate k by ignoring the (y+k).d term at first. Note that k can't be
# very large, since all offset-returning methods return a duration of magnitude
# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
# be 0, so ignoring it has no consequence then.
#
# In any case, the new value is
#
# z = y + y.s [4]
#
# It's helpful to step back at look at [4] from a higher level: it's simply
# mapping from UTC to tz's standard time.
#
# At this point, if
#
# z.n - z.o = x.n [5]
#
# we have an equivalent time, and are almost done. The insecurity here is
# at the start of daylight time. Picture US Eastern for concreteness. The wall
# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
# sense then. The docs ask that an Eastern tzinfo class consider such a time to
# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
# on the day DST starts. We want to return the 1:MM EST spelling because that's
# the only spelling that makes sense on the local wall clock.
#
# In fact, if [5] holds at this point, we do have the standard-time spelling,
# but that takes a bit of proof. We first prove a stronger result. What's the
# difference between the LHS and RHS of [5]? Let
#
# diff = x.n - (z.n - z.o) [6]
#
# Now
# z.n = by [4]
# (y + y.s).n = by #5
# y.n + y.s = since y.n = x.n
# x.n + y.s = since z and y are have the same tzinfo member,
# y.s = z.s by #2
# x.n + z.s
#
# Plugging that back into [6] gives
#
# diff =
# x.n - ((x.n + z.s) - z.o) = expanding
# x.n - x.n - z.s + z.o = cancelling
# - z.s + z.o = by #2
# z.d
#
# So diff = z.d.
#
# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
# spelling we wanted in the endcase described above. We're done. Contrarily,
# if z.d = 0, then we have a UTC equivalent, and are also done.
#
# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
# add to z (in effect, z is in tz's standard time, and we need to shift the
# local clock into tz's daylight time).
#
# Let
#
# z' = z + z.d = z + diff [7]
#
# and we can again ask whether
#
# z'.n - z'.o = x.n [8]
#
# If so, we're done. If not, the tzinfo class is insane, according to the
# assumptions we've made. This also requires a bit of proof. As before, let's
# compute the difference between the LHS and RHS of [8] (and skipping some of
# the justifications for the kinds of substitutions we've done several times
# already):
#
# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
# x.n - (z.n + diff - z'.o) = replacing diff via [6]
# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
# - z.n + z.n - z.o + z'.o = cancel z.n
# - z.o + z'.o = #1 twice
# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
# z'.d - z.d
#
# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
# we've found the UTC-equivalent so are done. In fact, we stop with [7] and
# return z', not bothering to compute z'.d.
#
# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
# would have to change the result dst() returns: we start in DST, and moving
# a little further into it takes us out of DST.
#
# There isn't a sane case where this can happen. The closest it gets is at
# the end of DST, where there's an hour in UTC with no spelling in a hybrid
# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
# UTC) because the docs insist on that, but 0:MM is taken as being in daylight
# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
# standard time. Since that's what the local clock *does*, we want to map both
# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
# in local time, but so it goes -- it's the way the local clock works.
#
# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
# (correctly) concludes that z' is not UTC-equivalent to x.
#
# Because we know z.d said z was in daylight time (else [5] would have held and
# we would have stopped then), and we know z.d != z'.d (else [8] would have held
# and we have stopped then), and there are only 2 possible values dst() can
# return in Eastern, it follows that z'.d must be 0 (which it is in the example,
# but the reasoning doesn't depend on the example -- it depends on there being
# two possible dst() outcomes, one zero and the other non-zero). Therefore
# z' must be in standard time, and is the spelling we want in this case.
#
# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
# concerned (because it takes z' as being in standard time rather than the
# daylight time we intend here), but returning it gives the real-life "local
# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
# tz.
#
# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
# the 1:MM standard time spelling we want.
#
# So how can this break? One of the assumptions must be violated. Two
# possibilities:
#
# 1) [2] effectively says that y.s is invariant across all y belong to a given
# time zone. This isn't true if, for political reasons or continental drift,
# a region decides to change its base offset from UTC.
#
# 2) There may be versions of "double daylight" time where the tail end of
# the analysis gives up a step too early. I haven't thought about that
# enough to say.
#
# In any case, it's clear that the default fromutc() is strong enough to handle
# "almost all" time zones: so long as the standard offset is invariant, it
# doesn't matter if daylight time transition points change from year to year, or
# if daylight time is skipped in some years; it doesn't matter how large or
# small dst() may get within its bounds; and it doesn't even matter if some
# perverse time zone returns a negative dst()). So a breaking case must be
# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
try:
from _datetime import *
except ImportError:
pass
else:
# Clean up unused names
del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
_DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
_check_date_fields, _check_int_field, _check_time_fields,
_check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
_date_class, _days_before_month, _days_before_year, _days_in_month,
_format_time, _format_offset, _is_leap, _isoweek1monday, _math,
_ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
_divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
_parse_hh_mm_ss_ff)
# XXX Since import * above excludes names that start with _,
# docstring does not get overwritten. In the future, it may be
# appropriate to maintain a single module level docstring and
# remove the following line.
from _datetime import __doc__
|
{
"content_hash": "8fd6604a7d0e6786cd000094d34b9cbd",
"timestamp": "",
"source": "github",
"line_count": 2433,
"max_line_length": 94,
"avg_line_length": 34.738183312782574,
"alnum_prop": 0.5473626919709411,
"repo_name": "Microsoft/PTVS",
"id": "dd6eca907dd440b90bf969ea2c34c7c7ad0102f0",
"size": "84518",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/datetime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12235396"
},
{
"name": "C++",
"bytes": "212001"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "44322"
},
{
"name": "Python",
"bytes": "847130"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
import smtplib
import thread
import urllib
import urlparse
from datetime import datetime, timedelta
from email.mime.text import MIMEText
from airy.core.web import AiryHandler, AiryRequestHandler
from airy.core.conf import settings
from users.decorators import login_required
from users.forms import *
from users.models import PasswordResetToken
class IndexHandler(AiryRequestHandler):
def get(self):
self.render("page.html")
class HomeHandler(AiryHandler):
def get(self):
if not self.current_user:
self.render("#menu", "users/user_out_menu.html")
self.render("#content", "index.html")
return
user = self.get_current_user()
self.render("#menu", "users/user_in_menu.html")
self.render("#content", "index.html")
def post(self):
if self.current_user:
self.redirect("/")
return
form = RegistrationForm(self.get_flat_arguments())
if form.is_valid():
user = form.save(self)
self.redirect("/")
else:
self.render("#menu", "users/user_out_menu.html")
self.render("#content", "index.html", form=form)
class AccountsLoginHandler(AiryHandler):
def get(self):
if self.get_current_user():
self.redirect("/")
else:
form = LoginForm()
self.render("#content", "users/login.html", form=form)
def post(self):
form = LoginForm(self.get_flat_arguments())
if form.is_valid():
form.save(self)
self.redirect("/")
else:
self.render("#content", "users/login.html", form=form)
class AccountsLogoutHandler(AiryHandler):
def get(self):
self.clear_cookie("session_key")
self.render("#menu", "users/user_out_menu.html")
self.redirect("/")
class AccountsRegisterHandler(AiryHandler):
def get(self):
form = RegistrationForm()
self.render("#content", "users/register.html", form=form)
def post(self):
form = RegistrationForm(self.get_flat_arguments())
if form.is_valid():
user = form.save(self)
self.redirect("/")
else:
self.render("#content", "users/register.html", form=form)
class AccountsProfileHandler(AiryHandler):
@login_required
def get(self):
user = self.get_current_user()
self.render(
"#content", "users/profile/main.html",
)
class AccountsForeignProfileHandler(AiryHandler):
def get(self, object_id):
try:
companies = list()
try:
user = User.objects.get(id=object_id)
except:
user = User.objects.get(username=object_id)
self.render(
"#content", "users/profile/foreign_profile.html",
user=user, companies=companies, current_user=self.get_current_user()
)
except:
self.render("#content", "http_404.html")
class AccountsChangeUserInfoHandler(AiryHandler):
@login_required
def post(self):
field = self.get_argument('field')
value = self.get_argument('value')
user = self.get_current_user()
setattr(user, field, value)
user.save()
class AccountsProfileDeleteHandler(AiryHandler):
@login_required
def post(self):
user = self.get_current_user()
actions = {
'education': user.delete_education,
'services': user.delete_service,
'experience': user.delete_experience,
'recommended_book': user.delete_recommended_book,
'interests': user.delete_interest,
}
actions[self.get_argument('field')](self.get_argument('object_id'))
class FileUpload(AiryHandler):
@login_required
def get(self, action):
if action == 'upload':
form = FileUploadForm()
self.render(
"#profile-picture",
"users/profile/picture_upload.html",
form=form
)
self.execute('project.users.picture_init();')
else:
self.render("#profile-picture", "users/profile/picture.html")
@login_required
def post(self, *args, **kwargs):
form = FileUploadForm(self.get_flat_arguments(), self.get_files())
if form.is_valid():
form.save(self.get_current_user())
self.render("#profile-picture", "users/profile/picture.html")
self.render("#menu", "user_in_menu.html")
return
self.render(
"#profile-picture",
"users/profile/picture_upload.html",
form=form
)
class AccountsRecoverPasswordHandler(AiryHandler):
def get(self, path=None):
if not path:
form = PasswordRecoveryForm()
self.render("#content", "users/password_recovery/recovery.html", form=form)
return
args = urlparse.parse_qs(path)
if not self.token_is_valid(args.get('token', [None])[0]):
self.redirect("/users/recovery/")
form = NewPasswordForm()
self.render("#content", "users/password_recovery/new_password.html", form=form)
def token_is_valid(self, token):
try:
password_reset_token = PasswordResetToken.objects.get(token=token)
if datetime.now() > password_reset_token.expired:
return False
return True
except Exception:
return False
def get_items_from_path(self, path):
try:
args = urlparse.parse_qs(path)
password_reset_token = PasswordResetToken.objects.get(token=args['token'][0])
user = User.objects.get(password_reset_token_list__contains=password_reset_token)
return user, password_reset_token
except Exception:
return None, None
def post(self, path=None):
#Email or username for password recovery, send mail
if not path:
form = PasswordRecoveryForm(self.get_flat_arguments())
if form.is_valid():
user = form.save()
thread.start_new_thread(self.send_recovery_mail, (user, self.generate_recovery_link(user)))
self.render("#content", "users/password_recovery/password_reset_sent.html")
else:
self.render("#content", "users/password_recovery/recovery.html", form=form)
return
#New password and confirmation
form = NewPasswordForm(self.get_flat_arguments())
user, password_reset_token = self.get_items_from_path(path)
if form.is_valid():
form.save(self, user)
#Delete password reset token
user.update(pull__password_reset_token_list=password_reset_token)
password_reset_token.delete()
self.redirect('/')
else:
self.render("#content", "users/password_recovery/new_password.html", form=form)
def generate_recovery_link(self, user):
expired = datetime.now() + timedelta(days=1)
token = md5(str(user.id) + expired.strftime("%H:%M:%S %m/%d/%Y")).hexdigest()
password_reset_token = PasswordResetToken(expired=expired, token=token)
password_reset_token.save()
user.password_reset_token_list.append(password_reset_token)
user.save()
params = urllib.urlencode(dict(token=token))
return 'http://%s:%s/users/recovery/%s' % (HOST, PORT, params)
def send_recovery_mail(self, user, url):
smtp_server = self.connect_smtp_server(
settings.email_host,
settings.email_port,
settings.email_host_user,
settings.email_host_password
)
smtp_server.sendmail(
settings.email_host_user,
user.email,
self.build_message(user, url)
)
smtp_server.quit()
def build_message(self, user, url):
msg = MIMEText('Password recovery link: %s' % url)
msg['Subject'] = 'Password recovery'
return msg.as_string()
def connect_smtp_server(self, email_host, email_port, user, password):
smtp_server = smtplib.SMTP('%s:%s' % (email_host, email_port))
smtp_server.starttls()
smtp_server.login(user, password)
return smtp_server
|
{
"content_hash": "41b67643a8786027697fbd1474e7a4b9",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 107,
"avg_line_length": 32.28846153846154,
"alnum_prop": 0.5903513996426444,
"repo_name": "letolab/airy",
"id": "a5fdc370e79a59318e90e9d280059bc03efab96f",
"size": "8395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airy/skeleton/project/users/handlers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "115012"
},
{
"name": "Python",
"bytes": "678842"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
import errno
from unittest import mock
from six.moves import builtins
from os_win import constants
from os_win import exceptions
from os_win.tests.unit import test_base
from os_win.utils.io import namedpipe
from os_win.utils.winapi import constants as w_const
class NamedPipeTestCase(test_base.BaseTestCase):
_FAKE_LOG_PATH = 'fake_log_path'
@mock.patch.object(namedpipe.NamedPipeHandler, '_setup_io_structures')
def setUp(self, mock_setup_structures):
super(NamedPipeTestCase, self).setUp()
self._mock_input_queue = mock.Mock()
self._mock_output_queue = mock.Mock()
self._mock_client_connected = mock.Mock()
self._ioutils = mock.Mock()
threading_patcher = mock.patch.object(namedpipe, 'threading')
threading_patcher.start()
self.addCleanup(threading_patcher.stop)
self._handler = namedpipe.NamedPipeHandler(
mock.sentinel.pipe_name,
self._mock_input_queue,
self._mock_output_queue,
self._mock_client_connected,
self._FAKE_LOG_PATH)
self._handler._ioutils = self._ioutils
def _mock_setup_pipe_handler(self):
self._handler._log_file_handle = mock.Mock()
self._handler._pipe_handle = mock.sentinel.pipe_handle
self._r_worker = mock.Mock()
self._w_worker = mock.Mock()
self._handler._workers = [self._r_worker, self._w_worker]
self._handler._r_buffer = mock.Mock()
self._handler._w_buffer = mock.Mock()
self._handler._r_overlapped = mock.Mock()
self._handler._w_overlapped = mock.Mock()
self._handler._r_completion_routine = mock.Mock()
self._handler._w_completion_routine = mock.Mock()
@mock.patch.object(builtins, 'open')
@mock.patch.object(namedpipe.NamedPipeHandler, '_open_pipe')
def test_start_pipe_handler(self, mock_open_pipe, mock_open):
self._handler.start()
mock_open_pipe.assert_called_once_with()
mock_open.assert_called_once_with(self._FAKE_LOG_PATH, 'ab', 1)
self.assertEqual(mock_open.return_value,
self._handler._log_file_handle)
thread = namedpipe.threading.Thread
thread.assert_has_calls(
[mock.call(target=self._handler._read_from_pipe),
mock.call().start(),
mock.call(target=self._handler._write_to_pipe),
mock.call().start()])
for worker in self._handler._workers:
self.assertIs(True, worker.daemon)
@mock.patch.object(namedpipe.NamedPipeHandler, 'stop')
@mock.patch.object(namedpipe.NamedPipeHandler, '_open_pipe')
def test_start_pipe_handler_exception(self, mock_open_pipe,
mock_stop_handler):
mock_open_pipe.side_effect = Exception
self.assertRaises(exceptions.OSWinException,
self._handler.start)
mock_stop_handler.assert_called_once_with()
@mock.patch.object(namedpipe.NamedPipeHandler, '_cleanup_handles')
@mock.patch.object(namedpipe.NamedPipeHandler, '_cancel_io')
def _test_stop_pipe_handler(self, mock_cancel_io,
mock_cleanup_handles,
workers_started=True):
self._mock_setup_pipe_handler()
if not workers_started:
handler_workers = []
self._handler._workers = handler_workers
else:
handler_workers = self._handler._workers
self._r_worker.is_alive.side_effect = (True, False)
self._w_worker.is_alive.return_value = False
self._handler.stop()
self._handler._stopped.set.assert_called_once_with()
if not workers_started:
mock_cleanup_handles.assert_called_once_with()
else:
self.assertFalse(mock_cleanup_handles.called)
if workers_started:
mock_cancel_io.assert_called_once_with()
self._r_worker.join.assert_called_once_with(0.5)
self.assertFalse(self._w_worker.join.called)
self.assertEqual([], self._handler._workers)
def test_stop_pipe_handler_workers_started(self):
self._test_stop_pipe_handler()
def test_stop_pipe_handler_workers_not_started(self):
self._test_stop_pipe_handler(workers_started=False)
@mock.patch.object(namedpipe.NamedPipeHandler, '_close_pipe')
def test_cleanup_handles(self, mock_close_pipe):
self._mock_setup_pipe_handler()
log_handle = self._handler._log_file_handle
r_event = self._handler._r_overlapped.hEvent
w_event = self._handler._w_overlapped.hEvent
self._handler._cleanup_handles()
mock_close_pipe.assert_called_once_with()
log_handle.close.assert_called_once_with()
self._ioutils.close_handle.assert_has_calls(
[mock.call(r_event), mock.call(w_event)])
self.assertIsNone(self._handler._log_file_handle)
self.assertIsNone(self._handler._r_overlapped.hEvent)
self.assertIsNone(self._handler._w_overlapped.hEvent)
def test_setup_io_structures(self):
self._handler._setup_io_structures()
self.assertEqual(self._ioutils.get_buffer.return_value,
self._handler._r_buffer)
self.assertEqual(self._ioutils.get_buffer.return_value,
self._handler._w_buffer)
self.assertEqual(
self._ioutils.get_new_overlapped_structure.return_value,
self._handler._r_overlapped)
self.assertEqual(
self._ioutils.get_new_overlapped_structure.return_value,
self._handler._w_overlapped)
self.assertEqual(
self._ioutils.get_completion_routine.return_value,
self._handler._r_completion_routine)
self.assertEqual(
self._ioutils.get_completion_routine.return_value,
self._handler._w_completion_routine)
self.assertIsNone(self._handler._log_file_handle)
self._ioutils.get_buffer.assert_has_calls(
[mock.call(constants.SERIAL_CONSOLE_BUFFER_SIZE)] * 2)
self._ioutils.get_completion_routine.assert_has_calls(
[mock.call(self._handler._read_callback),
mock.call()])
def test_open_pipe(self):
self._handler._open_pipe()
self._ioutils.wait_named_pipe.assert_called_once_with(
mock.sentinel.pipe_name)
self._ioutils.open.assert_called_once_with(
mock.sentinel.pipe_name,
desired_access=(w_const.GENERIC_READ | w_const.GENERIC_WRITE),
share_mode=(w_const.FILE_SHARE_READ | w_const.FILE_SHARE_WRITE),
creation_disposition=w_const.OPEN_EXISTING,
flags_and_attributes=w_const.FILE_FLAG_OVERLAPPED)
self.assertEqual(self._ioutils.open.return_value,
self._handler._pipe_handle)
def test_close_pipe(self):
self._mock_setup_pipe_handler()
self._handler._close_pipe()
self._ioutils.close_handle.assert_called_once_with(
mock.sentinel.pipe_handle)
self.assertIsNone(self._handler._pipe_handle)
def test_cancel_io(self):
self._mock_setup_pipe_handler()
self._handler._cancel_io()
overlapped_structures = [self._handler._r_overlapped,
self._handler._w_overlapped]
self._ioutils.cancel_io.assert_has_calls(
[mock.call(self._handler._pipe_handle,
overlapped_structure,
ignore_invalid_handle=True)
for overlapped_structure in overlapped_structures])
@mock.patch.object(namedpipe.NamedPipeHandler, '_start_io_worker')
def test_read_from_pipe(self, mock_start_worker):
self._mock_setup_pipe_handler()
self._handler._read_from_pipe()
mock_start_worker.assert_called_once_with(
self._ioutils.read,
self._handler._r_buffer,
self._handler._r_overlapped,
self._handler._r_completion_routine)
@mock.patch.object(namedpipe.NamedPipeHandler, '_start_io_worker')
def test_write_to_pipe(self, mock_start_worker):
self._mock_setup_pipe_handler()
self._handler._write_to_pipe()
mock_start_worker.assert_called_once_with(
self._ioutils.write,
self._handler._w_buffer,
self._handler._w_overlapped,
self._handler._w_completion_routine,
self._handler._get_data_to_write)
@mock.patch.object(namedpipe.NamedPipeHandler, '_cleanup_handles')
def _test_start_io_worker(self, mock_cleanup_handles,
buff_update_func=None, exception=None):
self._handler._stopped.isSet.side_effect = [False, True]
self._handler._pipe_handle = mock.sentinel.pipe_handle
self._handler.stop = mock.Mock()
io_func = mock.Mock(side_effect=exception)
fake_buffer = 'fake_buffer'
self._handler._start_io_worker(io_func, fake_buffer,
mock.sentinel.overlapped_structure,
mock.sentinel.completion_routine,
buff_update_func)
if buff_update_func:
num_bytes = buff_update_func()
else:
num_bytes = len(fake_buffer)
io_func.assert_called_once_with(mock.sentinel.pipe_handle,
fake_buffer, num_bytes,
mock.sentinel.overlapped_structure,
mock.sentinel.completion_routine)
if exception:
self._handler._stopped.set.assert_called_once_with()
mock_cleanup_handles.assert_called_once_with()
def test_start_io_worker(self):
self._test_start_io_worker()
def test_start_io_worker_with_buffer_update_method(self):
self._test_start_io_worker(buff_update_func=mock.Mock())
def test_start_io_worker_exception(self):
self._test_start_io_worker(exception=IOError)
@mock.patch.object(namedpipe.NamedPipeHandler, '_write_to_log')
def test_read_callback(self, mock_write_to_log):
self._mock_setup_pipe_handler()
fake_data = self._ioutils.get_buffer_data.return_value
self._handler._read_callback(mock.sentinel.num_bytes)
self._ioutils.get_buffer_data.assert_called_once_with(
self._handler._r_buffer, mock.sentinel.num_bytes)
self._mock_output_queue.put.assert_called_once_with(fake_data)
mock_write_to_log.assert_called_once_with(fake_data)
@mock.patch.object(namedpipe, 'time')
def test_get_data_to_write(self, mock_time):
self._mock_setup_pipe_handler()
self._handler._stopped.isSet.side_effect = [False, False]
self._mock_client_connected.isSet.side_effect = [False, True]
fake_data = 'fake input data'
self._mock_input_queue.get.return_value = fake_data
num_bytes = self._handler._get_data_to_write()
mock_time.sleep.assert_called_once_with(1)
self._ioutils.write_buffer_data.assert_called_once_with(
self._handler._w_buffer, fake_data)
self.assertEqual(len(fake_data), num_bytes)
@mock.patch.object(namedpipe.NamedPipeHandler, '_rotate_logs')
def _test_write_to_log(self, mock_rotate_logs, size_exceeded=False):
self._mock_setup_pipe_handler()
self._handler._stopped.isSet.return_value = False
fake_handle = self._handler._log_file_handle
fake_handle.tell.return_value = (constants.MAX_CONSOLE_LOG_FILE_SIZE
if size_exceeded else 0)
fake_data = 'fake_data'
self._handler._write_to_log(fake_data)
if size_exceeded:
mock_rotate_logs.assert_called_once_with()
self._handler._log_file_handle.write.assert_called_once_with(
fake_data)
def test_write_to_log(self):
self._test_write_to_log()
def test_write_to_log_size_exceeded(self):
self._test_write_to_log(size_exceeded=True)
def test_flush_log_file(self):
self._handler._log_file_handle = None
self._handler.flush_log_file()
self._handler._log_file_handle = mock.Mock()
self._handler.flush_log_file()
self._handler._log_file_handle.flush.side_effect = ValueError
self._handler.flush_log_file()
@mock.patch.object(namedpipe.NamedPipeHandler, '_retry_if_file_in_use')
@mock.patch.object(builtins, 'open')
@mock.patch.object(namedpipe, 'os')
def test_rotate_logs(self, mock_os, mock_open, mock_exec_retry):
fake_archived_log_path = self._FAKE_LOG_PATH + '.1'
mock_os.path.exists.return_value = True
self._mock_setup_pipe_handler()
fake_handle = self._handler._log_file_handle
self._handler._rotate_logs()
fake_handle.flush.assert_called_once_with()
fake_handle.close.assert_called_once_with()
mock_os.path.exists.assert_called_once_with(
fake_archived_log_path)
mock_exec_retry.assert_has_calls([mock.call(mock_os.remove,
fake_archived_log_path),
mock.call(mock_os.rename,
self._FAKE_LOG_PATH,
fake_archived_log_path)])
mock_open.assert_called_once_with(self._FAKE_LOG_PATH, 'ab', 1)
self.assertEqual(mock_open.return_value,
self._handler._log_file_handle)
@mock.patch.object(namedpipe, 'time')
def test_retry_if_file_in_use_exceeded_retries(self, mock_time):
class FakeWindowsException(Exception):
errno = errno.EACCES
raise_count = self._handler._MAX_LOG_ROTATE_RETRIES + 1
mock_func_side_eff = [FakeWindowsException] * raise_count
mock_func = mock.Mock(side_effect=mock_func_side_eff)
with mock.patch.object(namedpipe, 'WindowsError',
FakeWindowsException, create=True):
self.assertRaises(FakeWindowsException,
self._handler._retry_if_file_in_use,
mock_func, mock.sentinel.arg)
mock_time.sleep.assert_has_calls(
[mock.call(1)] * self._handler._MAX_LOG_ROTATE_RETRIES)
|
{
"content_hash": "31b21cf05f03e17dbabab5522ff10186",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 77,
"avg_line_length": 39.90934065934066,
"alnum_prop": 0.6078336889929098,
"repo_name": "openstack/os-win",
"id": "b28ca1ae7b45a0d7d68371a378e92e5df05d6003",
"size": "15166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "os_win/tests/unit/utils/io/test_namedpipe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1114520"
}
],
"symlink_target": ""
}
|
__version__ = '0.1'
import pep8
def check_helloworld(physical_line):
if pep8.noqa(physical_line):
return
pos = physical_line.find('Hello World')
if -1 != pos:
return pos, 'HW01 use "Hello World"'
check_helloworld.name = name = 'flake8-helloworld'
check_helloworld.version = __version__
|
{
"content_hash": "ac96d19973f3f4975f115f8f192c9c54",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 50,
"avg_line_length": 21.266666666666666,
"alnum_prop": 0.64576802507837,
"repo_name": "hhatto/flake8-helloworld",
"id": "6709f2df83b293dd88543471c008de8e34b2102e",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flake8_helloworld.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1702"
}
],
"symlink_target": ""
}
|
import itertools
from oslo_log import log as logging
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova.db import api as db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceFault(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added create()
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'code': fields.IntegerField(),
'message': fields.StringField(nullable=True),
'details': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, fault, db_fault):
# NOTE(danms): These are identical right now
for key in fault.fields:
fault[key] = db_fault[key]
fault._context = context
fault.obj_reset_changes()
return fault
@base.remotable_classmethod
def get_latest_for_instance(cls, context, instance_uuid):
db_faults = db.instance_fault_get_by_instance_uuids(context,
[instance_uuid])
if instance_uuid in db_faults and db_faults[instance_uuid]:
return cls._from_db_object(context, cls(),
db_faults[instance_uuid][0])
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
values = {
'instance_uuid': self.instance_uuid,
'code': self.code,
'message': self.message,
'details': self.details,
'host': self.host,
}
db_fault = db.instance_fault_create(self._context, values)
self._from_db_object(self._context, self, db_fault)
self.obj_reset_changes()
# Cells should only try sending a message over to nova-cells
# if cells is enabled and we're not the API cell. Otherwise,
# if the API cell is calling this, we could end up with
# infinite recursion.
if cells_opts.get_cell_type() == 'compute':
try:
cells_rpcapi.CellsAPI().instance_fault_create_at_top(
self._context, db_fault)
except Exception:
LOG.exception("Failed to notify cells of instance fault")
@base.NovaObjectRegistry.register
class InstanceFaultList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceFault <= version 1.1
# Version 1.1: InstanceFault version 1.2
# Version 1.2: Added get_latest_by_instance_uuids() method
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('InstanceFault'),
}
@base.remotable_classmethod
def get_latest_by_instance_uuids(cls, context, instance_uuids):
db_faultdict = db.instance_fault_get_by_instance_uuids(context,
instance_uuids,
latest=True)
db_faultlist = itertools.chain(*db_faultdict.values())
return base.obj_make_list(context, cls(context), objects.InstanceFault,
db_faultlist)
@base.remotable_classmethod
def get_by_instance_uuids(cls, context, instance_uuids):
db_faultdict = db.instance_fault_get_by_instance_uuids(context,
instance_uuids)
db_faultlist = itertools.chain(*db_faultdict.values())
return base.obj_make_list(context, cls(context), objects.InstanceFault,
db_faultlist)
|
{
"content_hash": "514273d178041033d9dbd7cfd90699f9",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 39.24528301886792,
"alnum_prop": 0.5903846153846154,
"repo_name": "mikalstill/nova",
"id": "3010145e7e3e43158cfbf626b76761693e18a1ce",
"size": "4765",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/objects/instance_fault.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22797282"
},
{
"name": "Shell",
"bytes": "32969"
},
{
"name": "Smarty",
"bytes": "418399"
}
],
"symlink_target": ""
}
|
"""Tests for stream_slice."""
import string
import StringIO
import unittest2
from apitools.base.py import exceptions
from apitools.base.py import stream_slice
class StreamSliceTest(unittest2.TestCase):
def setUp(self):
self.stream = StringIO.StringIO(string.letters)
self.value = self.stream.getvalue()
self.stream.seek(0)
def testSimpleSlice(self):
ss = stream_slice.StreamSlice(self.stream, 10)
self.assertEqual('', ss.read(0))
self.assertEqual(self.value[0:3], ss.read(3))
self.assertIn('7/10', str(ss))
self.assertEqual(self.value[3:10], ss.read())
self.assertEqual('', ss.read())
self.assertEqual('', ss.read(10))
self.assertEqual(10, self.stream.tell())
def testEmptySlice(self):
ss = stream_slice.StreamSlice(self.stream, 0)
self.assertEqual('', ss.read(5))
self.assertEqual('', ss.read())
self.assertEqual(0, self.stream.tell())
def testOffsetStream(self):
self.stream.seek(26)
ss = stream_slice.StreamSlice(self.stream, 26)
self.assertEqual(self.value[26:36], ss.read(10))
self.assertEqual(self.value[36:], ss.read())
self.assertEqual('', ss.read())
def testTooShortStream(self):
ss = stream_slice.StreamSlice(self.stream, 1000)
self.assertEqual(self.value, ss.read())
self.assertEqual('', ss.read(0))
with self.assertRaises(exceptions.StreamExhausted) as e:
ss.read()
with self.assertRaises(exceptions.StreamExhausted) as e:
ss.read(10)
self.assertIn('exhausted after %d' % len(self.value), str(e.exception))
if __name__ == '__main__':
unittest2.main()
|
{
"content_hash": "aaefa1d210d07841598f527a0fdef0a5",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 29.685185185185187,
"alnum_prop": 0.6818465377417342,
"repo_name": "craigcitro/original-apitools",
"id": "900ba8169a6eb6636db0d8974426fc2cd357d464",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apitools/base/py/stream_slice_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "315950"
}
],
"symlink_target": ""
}
|
DOCUMENTATION = '''
---
module: ecs_service
short_description: create, terminate, start or stop a service in ecs
description:
- Creates or terminates ecs services.
notes:
- the service role specified must be assumable (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
- for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
dependencies:
- An IAM role must have been created
version_added: "2.1"
author: Mark Chance (@java1guy)
options:
state:
description:
- The desired state of the service
required: true
choices: ["present", "absent", "deleting", "update"]
name:
description:
- The name of the service
required: true
cluster:
description:
- The name of the cluster in which the service exists
required: false
task_definition:
description:
- The task definition the service will run
required: false
load_balancers:
description:
- The list of ELBs defined for this service
required: false
desired_count:
description:
- The count of how many instances of the service
required: false
deployment_config:
description:
- A deployment configuration dictionary describing the minimumHealthPercent and maximumPercent deployment parameters for the service
required: false
client_token:
description:
- Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
required: false
role:
description:
- The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service.
required: false
delay:
description:
- The time to wait before checking that the service is available
required: false
default: 10
repeat:
description:
- The number of times to check that the service is available
required: false
default: 10
wait:
description:
- Wait for the task to complete (stop).
required: False
default: no
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- ecs_service:
state: present
name: console-test-service
cluster: new_cluster
task_definition: new_cluster-task:1
desired_count: 0
# Basic provisioning example
- ecs_service:
name: default
state: present
cluster: new_cluster
# Simple example to delete
- ecs_service:
name: default
state: absent
cluster: new_cluster
# Setting the deployment configuratino
- ecs_service:
state: present
name: console-test-service
cluster: new_cluster
task_definition: my_task_definition
desired_count: 1
deployment_config:
minimumHealthyPercent: 50
maximumPercent: 200
'''
# Disabled the RETURN as it was breaking docs building. Someone needs to fix
# this
RETURN = '''# '''
'''
# Create service
service: On create service, it returns the new values; on delete service, it returns the values for the service being deleted.
clusterArn: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
desiredCount: The desired number of instantiations of the task definition to keep running on the service.
loadBalancers: A list of load balancer objects
loadBalancerName: the name
containerName: The name of the container to associate with the load balancer.
containerPort: The port on the container to associate with the load balancer.
pendingCount: The number of tasks in the cluster that are in the PENDING state.
runningCount: The number of tasks in the cluster that are in the RUNNING state.
serviceArn: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
serviceName: A user-generated string used to identify the service
status: The valid values are ACTIVE, DRAINING, or INACTIVE.
taskDefinition: The ARN of a task definition to use for tasks in the service.
# Delete service
ansible_facts: When deleting a service, the values described above for the service prior to its deletion are returned.
'''
try:
import boto
import botocore
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
try:
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg="Can't authorize connection - "+str(e))
# def list_clusters(self):
# return self.client.list_clusters()
# {'failures=[],
# 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
# 'clusters=[{'activeServicesCount=0, 'clusterArn='arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status='ACTIVE', 'pendingTasksCount=0, 'runningTasksCount=0, 'registeredContainerInstancesCount=0, 'clusterName='default'}]}
# {'failures=[{'arn='arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason='MISSING'}],
# 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
# 'clusters=[]}
def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
for c in array_of_services:
if c[field_name].endswith(service_name):
return c
return None
def describe_service(self, cluster_name, service_name):
response = self.ecs.describe_services(
cluster=cluster_name,
services=[
service_name
])
msg = ''
if len(response['failures'])>0:
c = self.find_in_array(response['failures'], service_name, 'arn')
msg += ", failure reason is "+c['reason']
if c and c['reason']=='MISSING':
return {}
# fall thru and look through found ones
if len(response['services'])>0:
c = self.find_in_array(response['services'], service_name)
if c:
return c
raise StandardError("Unknown problem describing service %s." % service_name)
def create_service(self, service_name, cluster_name, task_definition,
load_balancers, desired_count, client_token, role, deployment_config):
response = self.ecs.create_service(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_definition,
loadBalancers=load_balancers,
desiredCount=desired_count,
clientToken=client_token,
role=role,
deploymentConfiguration=deployment_config)
if wait:
"""Waits for service to become stable"""
waiter = self.ecs.get_waiter('services_stable')
waiter.wait(cluster=cluster_name, services=[ service_name ])
response['service'] = self.describe_service(cluster_name, service_name)
service = response['service']
return service
def update_service(self, service_name, cluster_name, task_definition, desired_count, deployment_config, wait):
response = self.ecs.update_service(
cluster=cluster_name,
service=service_name,
taskDefinition=task_definition,
desiredCount=desired_count,
deploymentConfiguration=deployment_config)
if wait:
"""Waits for service to become stable"""
waiter = self.ecs.get_waiter('services_stable')
waiter.wait(cluster=cluster_name, services=[ service_name ])
response['service'] = self.describe_service(cluster_name, service_name)
service = response['service']
return service
def delete_service(self, service, cluster=None):
return self.ecs.delete_service(cluster=cluster, service=service)
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.datetime):
serial = obj.isoformat()
return serial
raise TypeError ("Type not serializable")
def fix_datetime(result):
"""Temporary fix to convert datetime fields from Boto3 to datetime string."""
"""See https://github.com/ansible/ansible-modules-extras/issues/1348."""
"""Not required for Ansible 2.1"""
return json.loads(json.dumps(result, default=json_serial))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'deleting', 'update'] ),
name=dict(required=True, type='str' ),
cluster=dict(required=False, type='str' ),
task_definition=dict(required=False, type='str' ),
load_balancers=dict(required=False, type='list' ),
deployment_config=dict(required=False, type='dict'),
desired_count=dict(required=False, type='int' ),
client_token=dict(required=False, type='str' ),
role=dict(required=False, type='str' ),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10),
wait=dict(required=False, type='bool', default=True)
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
if module.params['state'] == 'update':
update_params = [module.params.get(key) for key in ['task_definition', 'desired_count', 'deployment_config']]
if update_params.count(None) == len(update_params):
module.fail_json(msg="To update a service, you must specify one of task_definition, desired_count or deployment_config")
else:
if not module.params.get('task_definition'):
module.fail_json(msg="To use create a service, a task_definition must be specified")
if not module.params.get('desired_count'):
module.fail_json(msg="To use create a service, a desired_count must be specified")
service_mgr = EcsServiceManager(module)
try:
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
except Exception, e:
module.fail_json(msg="Exception describing service '"+module.params['name']+"' in cluster '"+module.params['cluster']+"': "+str(e))
# Check service exists and is active for updates
if (not existing or existing.get('status') != "ACTIVE") and module.params['state'] == 'update':
module.fail_json(msg="Service was not found or is not active.")
results = dict(changed=False)
if module.params['state'] == 'present':
if existing and existing.get('status') == "ACTIVE":
results['service']=existing
else:
if not module.check_mode:
if existing.get('status') != "ACTIVE":
existing={}
loadBalancers = module.params.get('load_balancers') or []
role = module.params.get('role') or ''
desiredCount = module.params.get('desired_count')
taskDefinition = module.params.get('task_definition')
deploymentConfig = module.params.get('deployment_config')
clientToken = module.params.get('client_token') or ''
wait = module.params.get('wait')
# Service doesn't exist or is inactive so create the service
response = fix_datetime(service_mgr.create_service(module.params['name'],
module.params['cluster'],
taskDefinition,
loadBalancers,
desiredCount,
clientToken,
role,
deploymentConfig,
wait))
results['service'] = response
results['changed'] = True
elif module.params['state'] == 'update':
if not module.check_mode:
loadBalancers = module.params.get('load_balancers') or []
desiredCount = module.params.get('desired_count') or existing.get('desiredCount')
taskDefinition = module.params.get('task_definition') or existing.get('taskDefinition')
deploymentConfig = module.params.get('deployment_config') or existing.get('deploymentConfiguration')
wait = module.params.get('wait')
response = fix_datetime(service_mgr.update_service(module.params['name'],
module.params['cluster'],
taskDefinition,
desiredCount,
deploymentConfig,
wait))
results['service'] = response
results['changed'] = True
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
del existing['deployments']
del existing['events']
results['ansible_facts'] = existing
if 'status' in existing and existing['status']=="INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
try:
fix_datetime(service_mgr.delete_service(
module.params['name'],
module.params['cluster']
))
except botocore.exceptions.ClientError, e:
module.fail_json(msg=e.message)
results['changed'] = True
elif module.params['state'] == 'deleting':
if not existing:
module.fail_json(msg="Service '"+module.params['name']+" not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
for i in range(repeat):
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
status = existing['status']
if status == "INACTIVE":
results['changed'] = True
break
time.sleep(delay)
if i is repeat-1:
module.fail_json(msg="Service still not deleted after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
return
module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
{
"content_hash": "8ef8fabf1bed53b4ed4156f8031c4178",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 317,
"avg_line_length": 41.939153439153436,
"alnum_prop": 0.6295338421749826,
"repo_name": "mikepolyak/bowling-kata",
"id": "4683d4f2aebdd2b9d4ec8bc35047dbddb522e262",
"size": "16524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/deploy/library/aws_ecs_service.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "292"
},
{
"name": "Java",
"bytes": "12848"
},
{
"name": "Makefile",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "40532"
},
{
"name": "Shell",
"bytes": "3195"
}
],
"symlink_target": ""
}
|
import distributions as dist
import random
import json
class person:
def __init__(self):
# The below is a measure of the average an individual can be expected to move
# around in the percentile spread of incomes -- in other words, if
# avg_inc_change = .04, then that means the average person can be expected to
# go from the 50th to the 54th, or the 46th percentile, in a given year.
# This should eventually be replaced with something tied to educational ach-
# ievment.
self.avg_inc_change = .04
self.networth = 0
def from_womb(self):
random.seed()
self.gender = self.roll_gender()
self.job = dist.get_job();
self.race = self.roll_race()
self.name = self.roll_name()
self.income = 0
self.age = 0
self.alive = True
self.income_percentile = random.uniform(0, 1)
def from_midlife(self, data):
self.gender = data["gender"]
self.race = data["race"]
self.name = data["name"]
self.income = float(data["income"])
self.age = int(data["age"])
self.networth = float(data["networth"])
self.job = data["job"]
self.alive = True
self.income_percentile = random.uniform(0, 1)
def roll_race(self):
# These stats come from the wiki page for "Demographics of the United States"
# -- note that the Hispanic population is over-represented by about 3% because
# of our lack of categories for Native Hawaiian, Pacific Islander, and Native
# American populations
dice = random.uniform(0, 1)
if(dice <= .637):
return "White"
elif(dice <= .637 + .122):
return "Black"
elif(dice <= .637 + .122 + .047):
return "Asian"
else:
return "Hispanic"
def roll_gender(self):
# Wikipedia claims that there are 1.048 males/females at birth, meaning the
# proportion of men is 1.048/(1 + 1.048) = .51171875
dice = random.uniform(0, 1)
if(dice <= .51171875):
return "Male"
else:
return "Female"
def roll_name(self):
return dist.get_name(self.gender)
def roll_birthday(self):
if(random.uniform(0, 1) < dist.death_prob(self.gender, self.age)):
return False
else:
return True
def roll_income(self):
# If we've got no income, hang tight
if(self.age < 25):
return
income_f = dist.income_cumulative_prob_inverse(self.gender, self.race, self.age)
income_percentile_f = dist.income_cumulative_prob_func(self.gender, self.race, self.age)
# Compute new income if we've had none so far
if(self.age == 25):
self.income = income_f(self.income_percentile)
# Else compute continuation income
else:
# Figure out our current income percentile and jiggle it
self.income_percentile = income_percentile_f(self.income)
self.income_percentile += random.gauss(0, self.avg_inc_change)
if(self.income_percentile > 1):
self.income_percentile = .99
elif(self.income_percentile < 0):
self.income_percentile = 0
# Compute new income
self.income = income_f(self.income_percentile)
def step_year(self):
self.print_state()
if(not self.roll_birthday()):
self.alive = False
else:
self.age = self.age + 1
self.roll_income()
self.networth += self.income * 0.1
def life_data(self):
data = []
while(self.alive):
data.append({
"age":self.age,
"gender":self.gender,
"race":self.race,
"name":self.name,
"income":self.income,
"networth":self.networth,
"job":self.job,
})
self.step_year()
return data
def print_state(self):
print("At age " + str(self.age) + ", I am in the " + str(100*self.income_percentile) + \
"th percentile. I make " + str(self.income) + " a year")
|
{
"content_hash": "f320c540fed527bf2a8e8136ad1761a2",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 90,
"avg_line_length": 32.127272727272725,
"alnum_prop": 0.6745897000565931,
"repo_name": "polpolion/TheFatalisticGameOfLife",
"id": "37cc88fb3ba306e660e1107c5091d0f208de90c0",
"size": "3534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/TFGOL/tfgol_web/new_person.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15846"
},
{
"name": "JavaScript",
"bytes": "1387121"
},
{
"name": "PHP",
"bytes": "26972"
},
{
"name": "Python",
"bytes": "40897"
},
{
"name": "Shell",
"bytes": "632"
}
],
"symlink_target": ""
}
|
"""Helper functions for SSD models meta architecture tests."""
import functools
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import post_processing_builder
from object_detection.core import anchor_generator
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import target_assigner
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.protos import calibration_pb2
from object_detection.protos import model_pb2
from object_detection.utils import ops
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
try:
import tf_slim as slim
except ImportError:
# TF 2.0 doesn't ship with contrib.
pass
# pylint: enable=g-import-not-at-top
keras = tf.keras.layers
class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""Fake ssd feature extracture for ssd meta arch tests."""
def __init__(self):
super(FakeSSDFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams_fn=None)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def extract_features(self, preprocessed_inputs):
with tf.variable_scope('mock_model'):
features = slim.conv2d(
inputs=preprocessed_inputs,
num_outputs=32,
kernel_size=1,
scope='layer1')
return [features]
class FakeSSDKerasFeatureExtractor(ssd_meta_arch.SSDKerasFeatureExtractor):
"""Fake keras based ssd feature extracture for ssd meta arch tests."""
def __init__(self):
with tf.name_scope('mock_model'):
super(FakeSSDKerasFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams=None,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
)
self._conv = keras.Conv2D(filters=32, kernel_size=1, name='layer1')
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def _extract_features(self, preprocessed_inputs, **kwargs):
with tf.name_scope('mock_model'):
return [self._conv(preprocessed_inputs)]
class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):
"""A simple 2x2 anchor grid on the unit square used for test only."""
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list, im_height, im_width):
return [
box_list.BoxList(
tf.constant(
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[1., 1., 1.5, 1.5] # Anchor that is outside clip_window.
],
tf.float32))
]
def num_anchors(self):
return 4
class SSDMetaArchTestBase(test_case.TestCase):
"""Base class to test SSD based meta architectures."""
def _create_model(
self,
model_fn=ssd_meta_arch.SSDMetaArch,
apply_hard_mining=True,
normalize_loc_loss_by_codesize=False,
add_background_class=True,
random_example_sampling=False,
expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE,
min_num_negative_samples=1,
desired_negative_sampling_ratio=3,
predict_mask=False,
use_static_shapes=False,
nms_max_size_per_class=5,
calibration_mapping_value=None,
return_raw_detections_during_predict=False):
is_training = False
num_classes = 1
mock_anchor_generator = MockAnchorGenerator2x2()
use_keras = tf_version.is_tf2()
if use_keras:
mock_box_predictor = test_utils.MockKerasBoxPredictor(
is_training, num_classes, add_background_class=add_background_class)
else:
mock_box_predictor = test_utils.MockBoxPredictor(
is_training, num_classes, add_background_class=add_background_class)
mock_box_coder = test_utils.MockBoxCoder()
if use_keras:
fake_feature_extractor = FakeSSDKerasFeatureExtractor()
else:
fake_feature_extractor = FakeSSDFeatureExtractor()
mock_matcher = test_utils.MockMatcher()
region_similarity_calculator = sim_calc.IouSimilarity()
encode_background_as_zeros = False
def image_resizer_fn(image):
return [tf.identity(image), tf.shape(image)]
classification_loss = losses.WeightedSigmoidClassificationLoss()
localization_loss = losses.WeightedSmoothL1LocalizationLoss()
non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=-20.0,
iou_thresh=1.0,
max_size_per_class=nms_max_size_per_class,
max_total_size=nms_max_size_per_class,
use_static_shapes=use_static_shapes)
score_conversion_fn = tf.identity
calibration_config = calibration_pb2.CalibrationConfig()
if calibration_mapping_value:
calibration_text_proto = """
function_approximation {
x_y_pairs {
x_y_pair {
x: 0.0
y: %f
}
x_y_pair {
x: 1.0
y: %f
}}}""" % (calibration_mapping_value, calibration_mapping_value)
text_format.Merge(calibration_text_proto, calibration_config)
score_conversion_fn = (
post_processing_builder._build_calibrated_score_converter( # pylint: disable=protected-access
tf.identity, calibration_config))
classification_loss_weight = 1.0
localization_loss_weight = 1.0
negative_class_weight = 1.0
normalize_loss_by_num_matches = False
hard_example_miner = None
if apply_hard_mining:
# This hard example miner is expected to be a no-op.
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=1.0)
random_example_sampler = None
if random_example_sampling:
random_example_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=0.5)
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
mock_matcher,
mock_box_coder,
negative_class_weight=negative_class_weight)
model_config = model_pb2.DetectionModel()
if expected_loss_weights == model_config.ssd.loss.NONE:
expected_loss_weights_fn = None
else:
raise ValueError('Not a valid value for expected_loss_weights.')
code_size = 4
kwargs = {}
if predict_mask:
kwargs.update({
'mask_prediction_fn': test_utils.MockMaskHead(num_classes=1).predict,
})
model = model_fn(
is_training=is_training,
anchor_generator=mock_anchor_generator,
box_predictor=mock_box_predictor,
box_coder=mock_box_coder,
feature_extractor=fake_feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=score_conversion_fn,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_loss_weight,
localization_loss_weight=localization_loss_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=False,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
add_background_class=add_background_class,
random_example_sampler=random_example_sampler,
expected_loss_weights_fn=expected_loss_weights_fn,
return_raw_detections_during_predict=(
return_raw_detections_during_predict),
**kwargs)
return model, num_classes, mock_anchor_generator.num_anchors(), code_size
def _get_value_for_matching_key(self, dictionary, suffix):
for key in dictionary.keys():
if key.endswith(suffix):
return dictionary[key]
raise ValueError('key not found {}'.format(suffix))
|
{
"content_hash": "5727acf628f12c593afd24b92e5563ff",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 104,
"avg_line_length": 35.29795918367347,
"alnum_prop": 0.6751850138760407,
"repo_name": "tombstone/models",
"id": "0991388b31ac6a5974c9297e50b9630b3966e489",
"size": "9337",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "research/object_detection/meta_architectures/ssd_meta_arch_test_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
}
|
from corehq.apps.fixtures.dbaccessors import get_fixture_data_types_in_domain
from corehq.apps.fixtures.models import FixtureDataType, FixtureTypeField, \
FixtureDataItem, FieldList, FixtureItemField, FixtureOwnership
from corehq.apps.locations.tests.util import LocationHierarchyTestCase
from corehq.apps.users.models import CommCareUser
class TestLocationOwnership(LocationHierarchyTestCase):
domain = 'fixture-location-ownership-testing'
location_type_names = ['state', 'county', 'city']
location_structure = [
('Massachusetts', [
('Middlesex', [
('Cambridge', []),
('Somerville', []),
]),
('Suffolk', [
('Boston', []),
])
])
]
@classmethod
def setUpClass(cls):
super(TestLocationOwnership, cls).setUpClass()
cls.tag = "big-mac-index"
data_type = FixtureDataType(
domain=cls.domain,
tag=cls.tag,
name="Big Mac Index",
fields=[
FixtureTypeField(field_name="cost", properties=[]),
FixtureTypeField(field_name="region", properties=[]),
],
item_attributes=[],
)
data_type.save()
def make_data_item(location_name, cost):
"""Make a fixture data item and assign it to location_name"""
data_item = FixtureDataItem(
domain=cls.domain,
data_type_id=data_type.get_id,
fields={
"cost": FieldList(
field_list=[FixtureItemField(
field_value=cost,
properties={},
)]
),
"location_name": FieldList(
field_list=[FixtureItemField(
field_value=location_name,
properties={},
)]
),
},
item_attributes={},
)
data_item.save()
FixtureOwnership(
domain=cls.domain,
owner_id=cls.locations[location_name].location_id,
owner_type='location',
data_item_id=data_item.get_id
).save()
make_data_item('Suffolk', '8')
make_data_item('Boston', '10')
make_data_item('Somerville', '7')
get_fixture_data_types_in_domain.clear(cls.domain)
cls.no_location_user = CommCareUser.create(cls.domain, 'no_location', '***')
cls.suffolk_user = CommCareUser.create(cls.domain, 'guy-from-suffolk', '***')
cls.suffolk_user.set_location(cls.locations['Suffolk'])
cls.boston_user = CommCareUser.create(cls.domain, 'guy-from-boston', '***')
cls.boston_user.set_location(cls.locations['Boston'])
cls.middlesex_user = CommCareUser.create(cls.domain, 'guy-from-middlesex', '***')
cls.middlesex_user.set_location(cls.locations['Middlesex'])
@staticmethod
def _get_value(fixture_item, field_name):
return fixture_item.fields[field_name].field_list[0].field_value
def test_sees_fixture_at_own_location(self):
fixture_items = FixtureDataItem.by_user(self.suffolk_user)
self.assertEqual(len(fixture_items), 1)
self.assertEqual(self._get_value(fixture_items[0], 'cost'), '8')
self.assertEqual(self._get_value(fixture_items[0], 'location_name'), 'Suffolk')
def test_sees_own_fixture_and_parent_fixture(self):
fixture_items = FixtureDataItem.by_user(self.boston_user)
self.assertItemsEqual(
[(self._get_value(item, 'cost'), self._get_value(item, 'location_name'))
for item in fixture_items],
[('8', 'Suffolk'), ('10', 'Boston')]
)
def test_has_no_assigned_fixture(self):
fixture_items = FixtureDataItem.by_user(self.middlesex_user)
self.assertEqual(len(fixture_items), 0)
|
{
"content_hash": "24aa646110793a283cc6e6c822aa5bea",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 89,
"avg_line_length": 38.875,
"alnum_prop": 0.5547860499628988,
"repo_name": "qedsoftware/commcare-hq",
"id": "a6b0b048c71e8292de458496bbee877c1136f5d1",
"size": "4043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/fixtures/tests/test_location_ownership.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
"""
Compute angular separation in the sky using haversine
Note:
decimal points on constants made 0 difference in `%timeit` execution time
The Meeus algorithm is about 9.5% faster than Astropy/Vicenty on my PC,
and gives virtually identical result
within double precision arithmetic limitations
"""
try:
from astropy.coordinates.angle_utilities import angular_separation
except ImportError:
pass
from .mathfun import asin, cos, degrees, radians, sqrt
__all__ = ["anglesep", "anglesep_meeus", "haversine"]
def anglesep_meeus(lon0: float, lat0: float, lon1: float, lat1: float, deg: bool = True) -> float:
"""
Parameters
----------
lon0 : float
longitude of first point
lat0 : float
latitude of first point
lon1 : float
longitude of second point
lat1 : float
latitude of second point
deg : bool, optional
degrees input/output (False: radians in/out)
Returns
-------
sep_rad : float
angular separation
Meeus p. 109
from "Astronomical Algorithms" by Jean Meeus Ch. 16 p. 111 (16.5)
gives angular distance in degrees between two rightAscension,Declination
points in the sky. Neglecting atmospheric effects, of course.
Meeus haversine method is stable all the way to exactly 0 deg.
either the arrays must be the same size, or one of them must be a scalar
"""
if deg:
lon0 = radians(lon0)
lat0 = radians(lat0)
lon1 = radians(lon1)
lat1 = radians(lat1)
sep_rad = 2 * asin(
sqrt(haversine(lat0 - lat1) + cos(lat0) * cos(lat1) * haversine(lon0 - lon1))
)
return degrees(sep_rad) if deg else sep_rad
def anglesep(lon0: float, lat0: float, lon1: float, lat1: float, deg: bool = True) -> float:
"""
Parameters
----------
lon0 : float
longitude of first point
lat0 : float
latitude of first point
lon1 : float
longitude of second point
lat1 : float
latitude of second point
deg : bool, optional
degrees input/output (False: radians in/out)
Returns
-------
sep_rad : float
angular separation
For reference, this is from astropy astropy/coordinates/angle_utilities.py
Angular separation between two points on a sphere.
"""
if deg:
lon0 = radians(lon0)
lat0 = radians(lat0)
lon1 = radians(lon1)
lat1 = radians(lat1)
try:
sep_rad = angular_separation(lon0, lat0, lon1, lat1)
except NameError:
sep_rad = anglesep_meeus(lon0, lat0, lon1, lat1, deg=False)
return degrees(sep_rad) if deg else sep_rad
def haversine(theta: float) -> float:
"""
Compute haversine
Parameters
----------
theta : float
angle (radians)
Results
-------
htheta : float
haversine of `theta`
https://en.wikipedia.org/wiki/Haversine
Meeus p. 111
"""
return (1 - cos(theta)) / 2.0
|
{
"content_hash": "719ca3dd49dd12b3107b48996b3c35ad",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 98,
"avg_line_length": 23.328125,
"alnum_prop": 0.6259209645010047,
"repo_name": "geospace-code/pymap3d",
"id": "a6dd4937cdfa44ae455f1f6363cfe331f437195b",
"size": "2986",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/pymap3d/haversine.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "MATLAB",
"bytes": "512"
},
{
"name": "Python",
"bytes": "166232"
}
],
"symlink_target": ""
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "table.legendgrouptitle"
_path_str = "table.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.table.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
{
"content_hash": "41881730f4aa59a1e1bfddf1dbc3b4b3",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 82,
"avg_line_length": 37.23348017621145,
"alnum_prop": 0.5597491717936584,
"repo_name": "plotly/plotly.py",
"id": "d48cf0853ff67d59f360fc31d255c88234109232",
"size": "8452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/table/legendgrouptitle/_font.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from shellbot import Command
class Next(Command):
"""
Displays the next item to do
>>>command = Next(store=store)
>>>shell.load_command(command)
"""
keyword = u'next'
information_message = u'Display next item to do'
def execute(self, bot, arguments=None, **kwargs):
"""
Displays the next item to do
"""
if self.engine.factory is None:
raise AttributeError(u'Todo factory has not been initialised')
item = self.engine.factory.read()
if item is not None:
bot.say(u"Coming next: {}".format(item))
else:
bot.say(u"Nothing to do yet.")
|
{
"content_hash": "7f01d7fc3ee7841a8f9df669a113fc0f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 74,
"avg_line_length": 25.423076923076923,
"alnum_prop": 0.583963691376702,
"repo_name": "bernard357/shellbot",
"id": "66cd92db03e6e532ec86faf651a9ce55e6442bb1",
"size": "1468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/todos/next.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2233"
},
{
"name": "Python",
"bytes": "807558"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import argparse
import cgi
import copy
from datetime import datetime, timedelta
from json import JSONEncoder
from flask import jsonify, Response, request
from flask_restplus import inputs
from queue import Queue, Empty
from flexget.api import api, APIResource
from flexget.api.app import (
APIError,
NotFoundError,
Conflict,
BadRequest,
success_response,
base_message_schema,
etag,
)
from flexget.config_schema import process_config
from flexget.entry import Entry
from flexget.event import event
from flexget.options import get_parser
from flexget.task import task_phases
from flexget.utils import json
from flexget.utils import requests
from flexget.utils.lazy_dict import LazyLookup
# Tasks API
tasks_api = api.namespace('tasks', description='Manage Tasks')
class ObjectsContainer(object):
tasks_list_object = {
'oneOf': [
{'type': 'array', 'items': {'$ref': '#/definitions/tasks.task'}},
{'type': 'array', 'items': {'type': 'string'}},
]
}
task_input_object = {
'type': 'object',
'properties': {'name': {'type': 'string'}, 'config': {'$ref': '/schema/plugins'}},
'required': ['name', 'config'],
'additionalProperties': False,
}
task_return_object = copy.deepcopy(task_input_object)
task_return_object['properties']['config'] = {'type': 'object'}
task_queue_schema = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'name': {'type': 'string'},
'current_phase': {'type': ['string', 'null']},
'current_plugin': {'type': ['string', 'null']},
},
},
}
task_execution_results_schema = {
'type': 'object',
'properties': {
'task': {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'name': {'type': 'string'},
'stream': {
'type': 'array',
'items': {
'progress': {
'type': 'object',
'properties': {
'status': {
'type': 'string',
'enum': ['pending', 'running', 'complete'],
},
'phase': {'type': 'string', 'enum': task_phases},
'plugin': {'type': 'string'},
'percent': {'type': 'float'},
},
},
'summary': {
'type': 'object',
'properties': {
'accepted': {'type': 'integer'},
'rejected': {'type': 'integer'},
'failed': {'type': 'integer'},
'undecided': {'type': 'integer'},
'aborted': {'type': 'boolean'},
'abort_reason': {'type': 'string'},
},
},
'entry_dump': {'type': 'array', 'items': {'type': 'object'}},
'log': {'type': 'string'},
},
},
},
}
},
}
inject_input = {
'type': 'object',
'properties': {
'title': {
'type': 'string',
'description': 'Title of the entry. If not supplied it will be attempted to retrieve it from '
'URL headers',
},
'url': {'type': 'string', 'format': 'url', 'description': 'URL of the entry'},
'force': {
'type': 'boolean',
'description': 'Prevent any plugins from rejecting this entry',
},
'accept': {
'type': 'boolean',
'description': 'Accept this entry immediately upon injection (disregard task filters)',
},
'fields': {
'type': 'object',
'description': 'A array of objects that can contain any other value for the entry',
},
},
'required': ['url'],
}
task_execution_input = {
'type': 'object',
'properties': {
'tasks': {
'type': 'array',
'items': {'type': 'string'},
'minItems': 1,
'uniqueItems': True,
},
'progress': {
'type': 'boolean',
'default': True,
'description': 'Include task progress updates',
},
'summary': {'type': 'boolean', 'default': True, 'description': 'Include task summary'},
'entry_dump': {
'type': 'boolean',
'default': True,
'description': 'Include dump of entries including fields',
},
'inject': {
'type': 'array',
'items': inject_input,
'description': 'A List of entry objects',
},
'loglevel': {
'type': 'string',
'description': 'Specify log level',
'enum': ['critical', 'error', 'warning', 'info', 'verbose', 'debug', 'trace'],
},
},
'required': ['tasks'],
}
params_return_schema = {'type': 'array', 'items': {'type': 'object'}}
tasks_list_schema = api.schema_model('tasks.list', ObjectsContainer.tasks_list_object)
task_input_schema = api.schema_model('tasks.task', ObjectsContainer.task_input_object)
task_return_schema = api.schema_model('tasks.task', ObjectsContainer.task_return_object)
task_api_queue_schema = api.schema_model('task.queue', ObjectsContainer.task_queue_schema)
task_api_execute_schema = api.schema_model(
'task.execution', ObjectsContainer.task_execution_results_schema
)
task_execution_schema = api.schema_model(
'task_execution_input', ObjectsContainer.task_execution_input
)
task_execution_params = api.schema_model(
'tasks.execution_params', ObjectsContainer.params_return_schema
)
task_api_desc = (
'Task config schema too large to display, you can view the schema using the schema API'
)
tasks_parser = api.parser()
tasks_parser.add_argument(
'include_config', type=inputs.boolean, default=True, help='Include task config'
)
@tasks_api.route('/')
@api.doc(description=task_api_desc)
class TasksAPI(APIResource):
@etag
@api.response(200, model=tasks_list_schema)
@api.doc(parser=tasks_parser)
def get(self, session=None):
""" List all tasks """
active_tasks = {
task: task_data
for task, task_data in self.manager.user_config.get('tasks', {}).items()
if not task.startswith('_')
}
args = tasks_parser.parse_args()
if not args.get('include_config'):
return jsonify(list(active_tasks))
tasks = [{'name': name, 'config': config} for name, config in active_tasks.items()]
return jsonify(tasks)
@api.validate(task_input_schema, description='New task object')
@api.response(201, description='Newly created task', model=task_return_schema)
@api.response(Conflict)
@api.response(APIError)
def post(self, session=None):
""" Add new task """
data = request.json
task_name = data['name']
if task_name in self.manager.user_config.get('tasks', {}):
raise Conflict('task already exists')
if 'tasks' not in self.manager.user_config:
self.manager.user_config['tasks'] = {}
if 'tasks' not in self.manager.config:
self.manager.config['tasks'] = {}
task_schema_processed = copy.deepcopy(data)
errors = process_config(
task_schema_processed, schema=task_input_schema.__schema__, set_defaults=True
)
if errors:
raise APIError('problem loading config, raise a BUG as this should not happen!')
self.manager.user_config['tasks'][task_name] = data['config']
self.manager.config['tasks'][task_name] = task_schema_processed['config']
self.manager.save_config()
self.manager.config_changed()
rsp = jsonify({'name': task_name, 'config': self.manager.user_config['tasks'][task_name]})
rsp.status_code = 201
return rsp
@tasks_api.route('/<task>/')
@api.doc(params={'task': 'task name'}, description=task_api_desc)
@api.response(APIError, description='unable to read config')
class TaskAPI(APIResource):
@etag
@api.response(200, model=task_return_schema)
@api.response(NotFoundError, description='task not found')
def get(self, task, session=None):
""" Get task config """
if task not in self.manager.user_config.get('tasks', {}):
raise NotFoundError('task `%s` not found' % task)
return jsonify({'name': task, 'config': self.manager.user_config['tasks'][task]})
@api.validate(task_input_schema)
@api.response(200, model=task_return_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
def put(self, task, session=None):
""" Update tasks config """
data = request.json
new_task_name = data['name']
if task not in self.manager.user_config.get('tasks', {}):
raise NotFoundError('task `%s` not found' % task)
if 'tasks' not in self.manager.user_config:
self.manager.user_config['tasks'] = {}
if 'tasks' not in self.manager.config:
self.manager.config['tasks'] = {}
if task != new_task_name:
# Rename task
if new_task_name in self.manager.user_config['tasks']:
raise BadRequest('cannot rename task as it already exist')
del self.manager.user_config['tasks'][task]
del self.manager.config['tasks'][task]
# Process the task config
task_schema_processed = copy.deepcopy(data)
errors = process_config(
task_schema_processed, schema=task_return_schema.__schema__, set_defaults=True
)
if errors:
raise APIError('problem loading config, raise a BUG as this should not happen!')
self.manager.user_config['tasks'][new_task_name] = data['config']
self.manager.config['tasks'][new_task_name] = task_schema_processed['config']
self.manager.save_config()
self.manager.config_changed()
rsp = jsonify(
{'name': new_task_name, 'config': self.manager.user_config['tasks'][new_task_name]}
)
rsp.status_code = 200
return rsp
@api.response(200, model=base_message_schema, description='deleted task')
@api.response(NotFoundError)
def delete(self, task, session=None):
""" Delete a task """
try:
self.manager.config['tasks'].pop(task)
self.manager.user_config['tasks'].pop(task)
except KeyError:
raise NotFoundError('task does not exist')
self.manager.save_config()
self.manager.config_changed()
return success_response('successfully deleted task')
default_start_date = (datetime.now() - timedelta(weeks=1)).strftime('%Y-%m-%d')
status_parser = api.parser()
status_parser.add_argument(
'succeeded', type=inputs.boolean, default=True, help='Filter by success status'
)
status_parser.add_argument(
'produced',
type=inputs.boolean,
default=True,
store_missing=False,
help='Filter by tasks that produced entries',
)
status_parser.add_argument(
'start_date',
type=inputs.datetime_from_iso8601,
default=default_start_date,
help='Filter by minimal start date. Example: \'2012-01-01\'',
)
status_parser.add_argument(
'end_date',
type=inputs.datetime_from_iso8601,
help='Filter by maximal end date. Example: \'2012-01-01\'',
)
status_parser.add_argument(
'limit',
default=100,
type=int,
help='Limit return of executions per task, as that number can be huge',
)
def _task_info_dict(task):
return {
'id': int(task.id),
'name': task.name,
'current_phase': task.current_phase,
'current_plugin': task.current_plugin,
}
@tasks_api.route('/queue/')
class TaskQueueAPI(APIResource):
@api.response(200, model=task_api_queue_schema)
def get(self, session=None):
""" List task(s) in queue for execution """
tasks = [_task_info_dict(task) for task in self.manager.task_queue.run_queue.queue]
if self.manager.task_queue.current_task:
tasks.insert(0, _task_info_dict(self.manager.task_queue.current_task))
return jsonify(tasks)
class ExecuteLog(Queue):
""" Supports task log streaming by acting like a file object """
def write(self, s):
self.put(json.dumps({'log': s}))
_streams = {}
# Another namespace for the same endpoint
inject_api = api.namespace('inject', description='Entry injection API')
@inject_api.route('/params/')
@tasks_api.route('/execute/params/')
@api.doc(description='Available payload parameters for task execute')
class TaskExecutionParams(APIResource):
@etag(cache_age=3600)
@api.response(200, model=task_execution_params)
def get(self, session=None):
""" Execute payload parameters """
return jsonify(ObjectsContainer.task_execution_input)
@inject_api.route('/')
@tasks_api.route('/execute/')
@api.doc(description='For details on available parameters query /params/ endpoint')
class TaskExecutionAPI(APIResource):
@api.response(NotFoundError)
@api.response(BadRequest)
@api.response(200, model=task_api_execute_schema)
@api.validate(task_execution_schema)
def post(self, session=None):
""" Execute task and stream results """
data = request.json
for task in data.get('tasks'):
if task.lower() not in [
t.lower() for t in self.manager.user_config.get('tasks', {}).keys()
]:
raise NotFoundError('task %s does not exist' % task)
queue = ExecuteLog()
output = queue if data.get('loglevel') else None
stream = (
True
if any(
arg[0] in ['progress', 'summary', 'loglevel', 'entry_dump']
for arg in data.items()
if arg[1]
)
else False
)
loglevel = data.pop('loglevel', None)
# This emulates the CLI command of using `--now` and `no-cache`
options = {
'interval_ignore': data.pop('now', None),
'nocache': data.pop('no_cache', None),
'allow_manual': True,
}
for option, value in data.items():
options[option] = value
if data.get('inject'):
entries = []
for item in data.get('inject'):
entry = Entry()
entry['url'] = item['url']
if not item.get('title'):
try:
value, params = cgi.parse_header(
requests.head(item['url']).headers['Content-Disposition']
)
entry['title'] = params['filename']
except KeyError:
raise BadRequest(
'No title given, and couldn\'t get one from the URL\'s HTTP response'
)
else:
entry['title'] = item.get('title')
if item.get('force'):
entry['immortal'] = True
if item.get('accept'):
entry.accept(reason='accepted by API inject')
if item.get('fields'):
for key, value in item.get('fields').items():
entry[key] = value
entries.append(entry)
options['inject'] = entries
executed_tasks = self.manager.execute(options=options, output=output, loglevel=loglevel)
tasks_queued = []
for task_id, task_name, task_event in executed_tasks:
tasks_queued.append({'id': task_id, 'name': task_name, 'event': task_event})
_streams[task_id] = {'queue': queue, 'last_update': datetime.now(), 'args': data}
if not stream:
return jsonify(
{'tasks': [{'id': task['id'], 'name': task['name']} for task in tasks_queued]}
)
def stream_response():
# First return the tasks to execute
yield '{"stream": ['
yield json.dumps(
{'tasks': [{'id': task['id'], 'name': task['name']} for task in tasks_queued]}
) + ',\n'
while True:
try:
yield queue.get(timeout=1) + ',\n'
continue
except Empty:
pass
if queue.empty() and all([task['event'].is_set() for task in tasks_queued]):
for task in tasks_queued:
del _streams[task['id']]
break
yield '{}]}'
return Response(stream_response(), mimetype='text/event-stream')
@event('manager.daemon.started')
def setup_params(mgr):
parser = get_parser('execute')
for action in parser._optionals._actions:
# Ignore list for irrelevant actions
ignore = ['help', 'verbose', 'silent', 'try-regexp', 'dump-config', 'dump']
name = action.option_strings[-1].strip('--')
if name in ignore or action.help == '==SUPPRESS==':
continue
name = name.replace('-', '_')
property_data = {'description': action.help.capitalize()}
if isinstance(action, argparse._StoreConstAction):
property_data['type'] = 'boolean'
elif isinstance(action, argparse._StoreAction):
if action.nargs in ['+', '*']:
property_data['type'] = 'array'
property_data['items'] = {'type': 'string'}
property_data['minItems'] = 1
else:
property_data['type'] = 'string'
else:
# Unknown actions should not be added to schema
property_data = None
# Some options maybe pre-added to schema with additional options, don't override them
if property_data and name not in ObjectsContainer.task_execution_input['properties']:
ObjectsContainer.task_execution_input['properties'][name] = property_data
ObjectsContainer.task_execution_input['additionalProperties'] = False
class EntryDecoder(JSONEncoder):
def default(self, o):
if isinstance(o, LazyLookup):
return '<LazyField>'
try:
return JSONEncoder.default(self, o)
except TypeError:
return str(o)
_phase_percents = {
'input': 5,
'metainfo': 10,
'filter': 30,
'download': 40,
'modify': 65,
'output': 75,
'exit': 100,
}
def update_stream(task, status='pending'):
if task.current_phase in _phase_percents:
task.stream['percent'] = _phase_percents[task.current_phase]
progress = {
'status': status,
'phase': task.current_phase,
'plugin': task.current_plugin,
'percent': task.stream.get('percent', 0),
}
task.stream['queue'].put(json.dumps({'progress': progress}))
@event('task.execute.started')
def start_task(task):
task.stream = _streams.get(task.id)
if task.stream and task.stream['args'].get('progress'):
update_stream(task, status='running')
@event('task.execute.completed')
def finish_task(task):
if task.stream:
if task.stream['args'].get('progress'):
update_stream(task, status='complete')
if task.stream['args'].get('entry_dump'):
entries = [entry.store for entry in task.entries]
task.stream['queue'].put(EntryDecoder().encode({'entry_dump': entries}))
if task.stream['args'].get('summary'):
task.stream['queue'].put(
json.dumps(
{
'summary': {
'accepted': len(task.accepted),
'rejected': len(task.rejected),
'failed': len(task.failed),
'undecided': len(task.undecided),
'aborted': task.aborted,
'abort_reason': task.abort_reason,
}
}
)
)
@event('task.execute.before_plugin')
def track_progress(task, plugin_name):
if task.stream and task.stream['args'].get('progress'):
update_stream(task, status='running')
|
{
"content_hash": "fb9304dcf0a7e0a398206b84e1867cf3",
"timestamp": "",
"source": "github",
"line_count": 621,
"max_line_length": 110,
"avg_line_length": 34.35748792270532,
"alnum_prop": 0.537870266216723,
"repo_name": "JorisDeRieck/Flexget",
"id": "470aeda4ff2d02c8130e6faed6c7ccbfac92c800",
"size": "21336",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/api/core/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "2338"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3512234"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.