hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf5c72fb860b9b7683b68c87e38a37eb90ec0d6 | 20,898 | py | Python | checkk2fov/fov.py | danxhuber/k2epic | 743587da578f187a6c069fbe02e5d4a5cadd3a98 | [
"MIT"
] | 2 | 2015-11-25T05:03:05.000Z | 2016-02-09T03:56:05.000Z | checkk2fov/fov.py | danxhuber/k2epic | 743587da578f187a6c069fbe02e5d4a5cadd3a98 | [
"MIT"
] | null | null | null | checkk2fov/fov.py | danxhuber/k2epic | 743587da578f187a6c069fbe02e5d4a5cadd3a98 | [
"MIT"
] | null | null | null | try:
import matplotlib.pyplot as mp
import matplotlib
except ImportError:
pass
import projection as proj
import numpy as np
import rotate as r
import greatcircle as gcircle
import definefov
__version__ = "$Id: fov.py 35 2013-12-19 22:27:34Z fergalm $"
__URL__ = "$URL: http://svn.code.sf.net/p/keplertwowheel/code/py/fov.py $"
"""
According to Instrument Handbook page 47, mod3 is in the +y direction
coordinates (+x is in the direction of the telescope pointing).
According to Flight Segment Users' Manual, p92 the sun shield is in
the +y directions.
So in the model used in KeplerFov, at ra,dec=0, a roll angle of 0
means pointing the solar array due North.
In two-wheel mode, we will want the roll angle equal to the solar
angle.
"""
def getFovAngleFromSpacecraftRoll(yaxis_deg):
"""The y-axis vector (perpendicular to the solar arrays)
lies 193 degrees clockwise of the angle
from the centre of the FOV to the centre of mod 3
As a diagram:
4 3 2
9 8 7 6 5
14 13 12 11 10
\
\
\
\ y-axis of S/C in this direction
This function converts from angles relative to spacecraft y-axis
to angles relative to the FOV
"""
return yaxis_deg + 13.0 + 180 -90
def getSpacecraftRollAngleFromFovAngle(fovAngle_deg):
"""See notes on getFovAngleFromSpacecraftYAxisAngle()"""
return fovAngle_deg - 13.0 - 180 + 90
class KeplerFov():
def __init__(self, ra_deg, dec_deg, roll_deg):
"""
A representation of the Kepler Field of View designed
for planning target observations.
Inputs:
ra_deg, dec_deg (floats) The direction to point the boresight
of the telescope. Note that while any
legal ra/dec is accepted by this class,
allowed values in the two wheel
mission are tightly constrained. Values
are in degrees
roll_deg Roll of the boresight. a roll of zero
orients the FOV so that mod 3 is due North.
Use getFovAngleFromSpacecraftRoll()
to convert from spacecraft roll values
Values for the prime mission are:
ra_deg = 290.66666667
dec_deg = +44.5
rollAngle_deg = 33.0 + n*90, where n is the season number
"""
#default map is set by setPointing()
#This is used for calculations of where objects lie within
#a channel, and is always a Gnomic projection centred on
#the boresight
self.defaultMap = None
self.plateScale_arcsecPerPix = 3.98
self.mods = range(1, 25)
#Remove the 4 FGS mods
self.mods.pop(0)
self.mods.pop(3)
self.mods.pop(-4)
#Relative vectors to the module corners.
#If the spacecraft was pointed at (ra, dec) = (0,0) with mod3
#pointed north, r.raDecFromVec() of these values would give
#The ra and decs of the corners of the modules.
self.origin = definefov.loadOriginVectors()
self.ra0_deg = ra_deg
self.dec0_deg = dec_deg
self.roll0_deg = roll_deg
self.currentRaDec = None
self.setPointing(ra_deg, dec_deg, roll_deg)
###
# Code related to pointing the spacecraft
###
def getOrigin(self, cartesian=False):
"""Return the ra/decs of the channel corners if the S/C
is pointed at the origin (ra,dec = 0,0)
Inputs:
cartesian (bool) If True, return each channel corner
as a unit vector
Returns:
A 2d numpy array. Each row represents a channel corner
The columns are module, output, channel, ra, dec
If cartestian is True, ra, and dec are replaced by the
coordinates of a 3 vector
"""
out = self.origin.copy()
if cartesian is False:
out = self.getRaDecs(out)
return out
def setPointing(self, ra_deg, dec_deg, roll_deg):
t = self.getPointing(ra_deg, dec_deg, roll_deg)
self.currentRaDec = t
self.defaultMap = proj.Gnomic(ra_deg, dec_deg)
self.ra0_deg = ra_deg
self.dec0_deg = dec_deg
self.roll0_deg = roll_deg
def getPointing(self, ra_deg, dec_deg, roll_deg, cartesian=False):
"""Compute a pointing model without changing the internal object pointing"""
#Roll FOV
Rrotate = r.rotateAboutVectorMatrix([1,0,0], roll_deg) #Roll
#Slew to ra/dec of zero
Ra = r.rightAscensionRotationMatrix(ra_deg)
Rd = r.declinationRotationMatrix(dec_deg)
Rslew = np.dot(Ra, Rd)
R = np.dot(Rslew, Rrotate)
slew = self.origin*1
for i, row in enumerate(self.origin):
slew[i, 3:6] = np.dot(R, row[3:6])
if cartesian is False:
slew = self.getRaDecs(slew)
return slew
def getRaDecs(self, mods):
"""Internal function converting cartesian coords to
ra dec"""
raDecOut = np.empty( (len(mods), 5))
raDecOut[:,0:3] = mods[:,0:3]
for i, row in enumerate(mods):
raDecOut[i, 3:5] = r.raDecFromVec(row[3:6])
return raDecOut
def getCoordsOfChannelCorners(self):
"""Get ra/decs of corners of channels.
Input:
(none)
Returns:
A 2d numpy array.
Each row represents a single corner of a channel.
The columns are:
module, output, channel, ra (degrees), dec (degrees)
Note that the locations of the FGS channels are
included in this output. FGS channels are 85-88
inclusive
"""
return self.currentRaDec
###
# Sky -> pixel code
###
def getChannelColRow(self, ra, dec, \
wantZeroOffset=False, allowIllegalReturnValues=True):
try:
ch = self.pickAChannel(ra, dec)
except ValueError:
print "WARN: %.7f %.7f not on any channel" %(ra, dec)
return (0,0,0)
col, row = self.getColRowWithinChannel(ra, dec, ch, \
wantZeroOffset, allowIllegalReturnValues)
return (ch, col, row)
def pickAChannel(self, ra_deg, dec_deg):
x,y = self.defaultMap.skyToPix(ra_deg, dec_deg)
for ch in np.unique(self.currentRaDec[:,2]):
poly = self.getChannelAsPolygon(ch)
if poly.isPointInside(x,y):
return ch
raise ValueError("Requested coords %.7f %.7f are not on any channel" %(ra_deg, dec_deg))
def getColRowWithinChannel(self, ra, dec, ch, \
wantZeroOffset=False, allowIllegalReturnValues=True):
"""How close is a given ra/dec to the origin of a KeplerModule
"""
x, y = self.defaultMap.skyToPix(ra, dec)
kepModule = self.getChannelAsPolygon(ch)
r = np.array([x[0],y[0]]) - kepModule.polygon[0,:]
#print kepModule.polygon
#print r
v1 = kepModule.polygon[1,:] - kepModule.polygon[0,:]
v3 = kepModule.polygon[3,:] - kepModule.polygon[0,:]
#Divide by |v|^2 because you're normalising v and r
colFrac = np.dot(r, v1) / np.linalg.norm(v1)**2
rowFrac = np.dot(r, v3) / np.linalg.norm(v3)**2
#This is where it gets a little hairy. The channel "corners"
#supplied to me actually represent points 5x5 pixels inside
#the science array. Which isn't what you'd expect.
#These magic numbers are the pixel numbers of the corner
#edges given in fov.txt
col = colFrac*(1106-17) + 17
row = rowFrac*(1038-25) + 25
if not allowIllegalReturnValues:
if not self.colRowIsOnSciencePixel(col, row):
msg = "Request position %7f %.7f " %(ra, dec)
msg += "does not lie on science pixels for channel %i " %(ch)
msg += "[ %.1f %.1f]" %(col, row)
raise ValueError(msg)
#Convert from zero-offset to one-offset coords
if not wantZeroOffset:
col += 1
row += 1
return (col, row)
def colRowIsOnSciencePixel(self, col, row):
"""Is col row on a science pixel?
Ranges taken from Fig 25 or Instrument Handbook (p50)
"""
padding = 00
#if col < 12. or col > 1111:
if col < 12.-padding or col > 1111+padding:
return False
#if row < 20 or row > 1043:
if row < 20-padding or row > 1043+padding:
return False
return True
def getColRowWithinFgsCh(self, ra, dec, ch, \
wantZeroOffset=False, allowIllegalReturnValues=True):
"""How close is a given ra/dec to the origin of an FGS mod
Returns col and row of the position.
"""
x, y = self.defaultMap.skyToPix(ra, dec)
kepModule = self.getChannelAsPolygon(ch)
r = np.array([x[0],y[0]]) - kepModule.polygon[0,:]
v1 = kepModule.polygon[1,:] - kepModule.polygon[0,:]
v3 = kepModule.polygon[3,:] - kepModule.polygon[0,:]
colFrac = np.dot(r, v1) / np.linalg.norm(v1)**2
rowFrac = np.dot(r, v3) / np.linalg.norm(v3)**2
col = colFrac*(547)
row = rowFrac*(527)
if not allowIllegalReturnValues:
if not self.colRowIsOnFgsPixel(col, row):
msg = "Request position %7f %.7f " %(ra, dec)
msg += "does not lie on FGS pixels for channel %i " %(ch)
msg += "[ %.1f %.1f]" %(col, row)
raise ValueError(msg)
#Convert from zero-offset to one-offset coords
if not wantZeroOffset:
col += 1
row += 1
return (col, row)
def colRowIsOnFgsPixel(self, col, row):
"""Is col row on a science pixel?
Ranges taken from Fig 25 or Instrument Handbook (p50)
"""
if col < 12. or col > 547:
return False
if row < 0 or row > 527:
return False
return True
###
# Pixel --> sky
###
def getRaDecForChannelColRow(self, ch, col, row, oneOffsetPixels=True):
if oneOffsetPixels:
col -= 1
row -= 1
#Convert col row to colFrac, rowFrac
#See notes in getColRowWithinChannel
padding = 00
colFrac = (col-(17.-padding)) / ((1106.+padding)-(17.-padding))
rowFrac = (row-(25.-padding)) / ((1038.+padding)-(25.-padding))
#Get basis vectors for channel. vZero is vector close
#to readout of chip (c,r) = (0,0)
#vCol is a vector in increasing column direction
kepModule = self.getChannelAsPolygon(ch)
vZero = kepModule.polygon[0,:]
vCol = kepModule.polygon[1,:] - vZero
vRow = kepModule.polygon[3,:] - vZero
#Where on the projected plane does col,row lie?
projectionXy = vZero + (colFrac*vCol) + (rowFrac*vRow)
#Call pixToSky
x, y = projectionXy
a, d = self.defaultMap.pixToSky(x, y)
return [a[0], d[0]]
###
# Polygon code: sky <--> pix and other functions use
# these polygons to represent a single channel on the FOV
###
def getAllChannelsAsPolygons(self, maptype=None):
"""Return slew the telescope and return the corners of the modules
as Polygon objects.
If a projection is supplied, the ras and
decs are mapped onto x, y using that projection
"""
polyList = []
for ch in self.origin[:,2]:
poly = self.getChannelAsPolygon(ch, maptype)
polyList.append(poly)
return polyList
def getChannelAsPolygon(self, chNumber, maptype=None):
if maptype is None:
maptype=self.defaultMap
radec = self.currentRaDec
idx = np.where(radec[:,2].astype(np.int) == chNumber)[0]
if not np.any(idx):
raise ValueError("%i is not a valid channel number" %(chNumber))
x,y = maptype.skyToPix(radec[idx,3], radec[idx,4])
return KeplerModOut(chNumber, x=x, y=y)
###
# Plotting code
###
def plotPointing(self, maptype=None, colour='b', mod3='r', showOuts=True, **kwargs):
"""Plot the FOV
mod3 is for mod 3 and mod 7
"""
if maptype is None:
maptype=self.defaultMap
#self.plotSpacecraftYAxis(maptype=maptype)
radec = self.currentRaDec
mods = self.mods
for ch in radec[:,2][::4]:
idx = np.where(radec[:,2].astype(np.int) == ch)[0]
idx = np.append(idx, idx[0]) #% points to draw a box
c = colour
#mod3 variable now include mod 3 and mod 7
if ch in [5,6,7,8,17,18,19,20]:
c = mod3
maptype.plot(radec[idx, 3], radec[idx, 4], '-', color=c, **kwargs)
#Show the origin of the col and row coords for this ch
if showOuts:
maptype.plot(radec[idx[0], 3], radec[idx[0],4], 'o', color=c)
def plotOutline(self, maptype=None, colour='#AAAAAA', **kwargs):
"""Plot an outline of the FOV.
"""
if maptype is None:
maptype=self.defaultMap
xarr = []
yarr = []
radec = self.currentRaDec
for ch in [20,4,11,28,32, 71,68, 84, 75, 60, 56, 15 ]:
idx = np.where(radec[:,2].astype(np.int) == ch)[0]
idx = idx[0] #Take on the first one
x, y = maptype.skyToPix(radec[idx][3], radec[idx][4])
xarr.append(x)
yarr.append(y)
#maptype.plot(alpha, delta, '-', color=colour, **kwargs)
verts = np.empty( (len(xarr), 2))
verts[:,0] = xarr
verts[:,1] = yarr
p = matplotlib.patches.Polygon(verts, fill=True, ec="none", fc=colour)
mp.gca().add_patch(p)
#for i in range(len(alpha)):
#verts[i, 0, :] = [alpha[i], delta[i]]
#from matplotlib.collections import PolyCollection
#coll = PolyCollection(verts, facecolor=colour)
#ax = mp.gca()
#ax.add_collection(coll)
#import pdb; pdb.set_trace()
def plotSpacecraftYAxis(self, maptype=None):
"""Plot a line pointing in the direction of the spacecraft
y-axis (i.e normal to the solar panel
"""
if maptype is None:
maptype=self.defaultMap
#Plot direction of spacecraft +y axis. The subtraction of
#90 degrees accounts for the different defintions of where
#zero roll is.
yAngle_deg = getSpacecraftRollAngleFromFovAngle(self.roll0_deg)
yAngle_deg -=90
a,d = gcircle.sphericalAngDestination(self.ra0_deg, self.dec0_deg, -yAngle_deg, 12.0)
x0, y0 = maptype.skyToPix(self.ra0_deg, self.dec0_deg)
x1, y1 = maptype.skyToPix(a, d)
mp.plot([x0, x1], [y0, y1], 'k-')
def plotChIds(self, maptype=None, modout=False):
"""Print the channel numbers on the plotting display"""
if maptype is None:
maptype = self.defaultMap
polyList = self.getAllChannelsAsPolygons(maptype)
for p in polyList:
p.identifyModule(modout=modout, maptype=maptype)
def getWcsForChannel1(self, ch):
crpix =np.array( [500, 500]) #Rough guess at centre
a,d = self.getRaDecForChannelColRow(ch, crpix[0], crpix[1])
crval = np.array([a,d])
#Get rotation of channel relative to FOV
kepModule = self.getChannelAsPolygon(ch)
vZero = kepModule.polygon[0,:]
vCol = kepModule.polygon[1,:] - vZero
vRow = kepModule.polygon[3,:] - vZero
ang_rad = np.arctan2(vCol[1], vCol[0])
#ang_rad -= np.radians(.2308) #Debugging code
if np.cross(vCol, vRow) >= 0:
sign = +1
else:
sign = -1
CD = np.empty( (2,2))
CD[0,0] = np.cos(ang_rad)
CD[0,1] = np.sin(ang_rad)
CD[1,0] = -np.sin(ang_rad)
CD[1,1] = np.cos(ang_rad)
if sign < 0:
CD[1,:] *= -1
CD *= self.plateScale_arcsecPerPix/3600.
return crval, crpix, CD
###############################################
# Polygon and KepModule code
################################################
class Polygon():
def __init__(self, x=None, y=None, pointList=None):
"""
Input
pointList A list of (x,y) pairs. Eg
[ (0,0), (1,0), (0,1), (1.1)]
The edges of the polygon join adjacent elements
of this list, so the order matters. The last
point is assumed to connect to the first point.
"""
if x is not None and y is not None:
pointList = []
for xi, yi in zip(x, y):
pointList.append( (xi, yi))
if pointList is None:
raise ValueError("Must supply x,y or pointList")
self.polygon = np.array(pointList)
def __str__(self):
return self.polygon.__str__()
def __repr__(self):
return self.polygon.__repr__()
def isPointInside(self, xp, yp):
"""Is the given point inside the polygon?
Input:
polygon (nx2 numpy array). polygon[i] = [x, y] coords of
a vertex of a polygon
point (1x2) numpy array) x,y coords of the point we wish
to determine if it's in the polygon or not.
Returns true/ false
Does this work in >2 dimensions? Probably, with a little bit
of work
"""
point = np.array([xp, yp]).transpose()
polygon = self.polygon
numVert, numDim = polygon.shape
#Subtract each point from the previous one.
polyVec = np.roll(polygon, -1, 0) - polygon
#Get the vector from each vertex to the given point
pointVec = point - polygon
crossProduct = np.cross(polyVec, pointVec)
if np.all(crossProduct < 0) or np.all(crossProduct > 0):
return True
return False
def draw(self, **kwargs):
ax = mp.gca()
shape = matplotlib.patches.Polygon(self.polygon, **kwargs)
ax.add_artist(shape)
class KeplerModOut(Polygon):
def __init__(self, channel, x=None, y=None, pointList=None):
Polygon.__init__(self, x,y,pointList)
self.channel = channel
def getChannel(self):
return self.channel
def identifyModule(self, maptype=mp, modout=False):
x,y = np.mean(self.polygon, 0)
if modout:
modout = modOutFromChannel(self.channel)
mp.text(x, y, "%i-%i" %(modout[0], modout[1]))
else:
mp.text(x,y, "%i" %(self.channel))
#########################################################
# channel <--> mod out
#########################################################
def channelFromModOut(mod, out):
lookup = loadChannelModOutLookup()
return lookup[mod, out]
def modOutFromChannel(ch):
lookup = loadChannelModOutLookup()
idx = lookup == ch
idx[:,0] = False
if not np.any(idx):
raise ValueError("Illegal channel request")
if np.sum(idx) > 1:
raise ValueError("Channel number begins at 1, not zero")
modout = np.where(idx)
mod = modout[0][0]
out = modout[1][0]
return (mod, out)
def loadChannelModOutLookup():
lookup = np.array( [ \
[ 0, 0, 0, 0, 0], \
[ 1, 85, 0, 0, 0], \
[ 2, 1, 2, 3, 4], \
[ 3, 5, 6, 7, 8], \
[ 4, 9, 10, 11, 12], \
[ 5, 86, 0, 0, 0], \
[ 6, 13, 14, 15, 16], \
[ 7, 17, 18, 19, 20], \
[ 8, 21, 22, 23, 24], \
[ 9, 25, 26, 27, 28], \
[10, 29, 30, 31, 32], \
[11, 33, 34, 35, 36], \
[12, 37, 38, 39, 40], \
[13, 41, 42, 43, 44], \
[14, 45, 46, 47, 48], \
[15, 49, 50, 51, 52], \
[16, 53, 54, 55, 56], \
[17, 57, 58, 59, 60], \
[18, 61, 62, 63, 64], \
[19, 65, 66, 67, 68], \
[20, 69, 70, 71, 72], \
[21, 87, 0, 0, 0], \
[22, 73, 74, 75, 76], \
[23, 77, 78, 79, 80], \
[24, 81, 82, 83, 84], \
[25, 88, 0, 0, 0], \
])
return lookup
#####################################################################
#####################################################################
#####################################################################
#def getRaDecOut(vectors):
#raDecOut = np.empty( (len(vectors), 2))
#for i, row in enumerate(vectors):
#raDecOut[i] = r.raDecFromVec(row)
#return raDecOut
| 29.516949 | 96 | 0.54967 |
acf5c7aa5a20348d70fccd1f9d26ad6463903e01 | 9,483 | py | Python | data/clipimages.py | ChmarsLuo/Charms-Semantic-Segmentation-Models | 4a8cdf82a218c3d3e1c8d10ef6a9118c8e6f3f89 | [
"Apache-2.0"
] | 5 | 2021-03-09T22:56:03.000Z | 2021-06-18T12:20:34.000Z | data/clipimages.py | ChmarsLuo/Charms-Semantic-Segmentation-Models | 4a8cdf82a218c3d3e1c8d10ef6a9118c8e6f3f89 | [
"Apache-2.0"
] | null | null | null | data/clipimages.py | ChmarsLuo/Charms-Semantic-Segmentation-Models | 4a8cdf82a218c3d3e1c8d10ef6a9118c8e6f3f89 | [
"Apache-2.0"
] | 1 | 2021-01-23T08:32:46.000Z | 2021-01-23T08:32:46.000Z | # -*- coding: utf-8 -*-
import os
import cv2
from osgeo import gdal
import numpy as np
def read_img(filename):
dataset=gdal.Open(filename)
im_width = dataset.RasterXSize
im_height = dataset.RasterYSize
im_geotrans = dataset.GetGeoTransform()
im_proj = dataset.GetProjection()
im_data = dataset.ReadAsArray(0,0,im_width,im_height)
del dataset
return im_proj,im_geotrans,im_width, im_height,im_data
def write_img(filename,im_proj,im_geotrans,im_data):
if 'int8' in im_data.dtype.name:
datatype = gdal.GDT_Byte
elif 'int16' in im_data.dtype.name:
datatype = gdal.GDT_UInt16
else:
datatype = gdal.GDT_Float32
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
else:
im_bands, (im_height, im_width) = 1,im_data.shape
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
dataset.SetGeoTransform(im_geotrans)
dataset.SetProjection(im_proj)
if im_bands == 1:
dataset.GetRasterBand(1).WriteArray(im_data)
else:
for i in range(im_bands):
dataset.GetRasterBand(i+1).WriteArray(im_data[i])
def gdal_image_clip(inpath, outpath, new_width=500, stride=200):
test_im_dir = os.listdir(inpath)
for name in test_im_dir:
if name[-4:] == '.png':
print("dealing the ",name," ...")
img = os.path.join(inpath, name)
im_proj,im_geotrans,im_width, im_height,im_data = read_img(img)
new_w = im_width
new_h = im_height
extent_data = im_data
# print(extent_data.shape)
count = 0
i = 0
num_ = 0
filename = name[:-4]
while i in range(new_h):
j=0
if (new_h-i) >=new_width:
while j in range(new_w):
if (new_w-j) >=new_width:
num_=num_+1
im_data_m=extent_data[:,i:i+new_width,j:j+new_width]
patch_path = os.path.join(outpath, filename + '_' + str(num_) + '.png')
im_data_m = im_data_m.transpose(1,2,0)
cv2.imwrite(patch_path, im_data_m, [int(cv2.cv2.IMWRITE_PNG_COMPRESSION),0])
# write_img(os.path.join(outpath, filename + '_' + str(num_) + '.tif'), im_proj, im_geotrans, im_data_m)
j=j+stride
if (new_w-j) <new_width:
num_=num_+1
im_data_m=extent_data[:,i:i+new_width,new_w-new_width:new_w]
patch_path = os.path.join(outpath, filename + '_' + str(num_) + '.png')
im_data_m = im_data_m.transpose(1,2,0)
cv2.imwrite(patch_path, im_data_m, [int(cv2.cv2.IMWRITE_PNG_COMPRESSION),0])
# write_img(os.path.join(outpath, filename + '_' + str(num_) + '.tif'), im_proj, im_geotrans, im_data_m)
j=new_w+1
i=i+stride
else :
while j in range(new_w):
if (new_w-j) >=new_width:
num_=num_+1
im_data_m=extent_data[:,new_h-new_width:new_h,j:j+new_width]
patch_path = os.path.join(outpath, filename + '_' + str(num_) + '.png')
im_data_m = im_data_m.transpose(1,2,0)
cv2.imwrite(patch_path, im_data_m, [int(cv2.cv2.IMWRITE_PNG_COMPRESSION),0])
# write_img(os.path.join(outpath, filename + '_' + str(num_) + '.tif'), im_proj, im_geotrans, im_data_m)
j=j+stride
if (new_w-j) <new_width:
num_=num_+1
im_data_m=extent_data[:,new_h-new_width:new_h,new_w-new_width:new_w]
patch_path = os.path.join(outpath, filename + '_' + str(num_) + '.png')
im_data_m = im_data_m.transpose(1,2,0)
cv2.imwrite(patch_path, im_data_m, [int(cv2.cv2.IMWRITE_PNG_COMPRESSION),0])
# write_img(os.path.join(outpath, filename + '_' + str(num_) + '.tif'), im_proj, im_geotrans, im_data_m)
j=new_w+1
i=new_h+1
def gdal_label_clip(inpath, outpath, new_width=500, stride=200):
test_im_dir = os.listdir(inpath)
for name in test_im_dir:
if name[-4:] == '.png':
print("dealing the ",name," ...")
img = os.path.join(inpath, name)
im_proj,im_geotrans,im_width, im_height,im_data = read_img(img)
new_w = im_width
new_h = im_height
extent_data = im_data
# print(extent_data.shape)
count = 0
i = 0
num_ = 0
filename = name[:-4]
while i in range(new_h):
j=0
if (new_h-i) >=new_width:
while j in range(new_w):
if (new_w-j) >=new_width:
num_=num_+1
# im_data_m=extent_data[:,i:i+new_width,j:j+new_width]
im_data_m = extent_data[ i:i + new_width, j:j + new_width]
# print(im_data_m)
patch_path = os.path.join(outpath, filename + '_' + str(num_) + '.png')
print(patch_path)
# im_data_m = im_data_m.transpose(1,2,0)
cv2.imwrite(patch_path, im_data_m, [int(cv2.cv2.IMWRITE_PNG_COMPRESSION),0])
# write_img(os.path.join(outpath, filename + '_' + str(num_) + '.tif'), im_proj, im_geotrans, im_data_m)
j=j+stride
if (new_w-j) <new_width:
num_=num_+1
# im_data_m=extent_data[0,i:i+new_width,new_w-new_width:new_w]
im_data_m = extent_data[ i:i + new_width, new_w - new_width:new_w]
patch_path = os.path.join(outpath, filename + '_' + str(num_) + '.png')
# im_data_m = im_data_m.transpose(1,2,0)
cv2.imwrite(patch_path, im_data_m, [int(cv2.cv2.IMWRITE_PNG_COMPRESSION),0])
# write_img(os.path.join(outpath, filename + '_' + str(num_) + '.tif'), im_proj, im_geotrans, im_data_m)
j=new_w+1
i=i+stride
else :
while j in range(new_w):
if (new_w-j) >=new_width:
num_=num_+1
# im_data_m=extent_data[0,new_h-new_width:new_h,j:j+new_width]
im_data_m = extent_data[ new_h - new_width:new_h, j:j + new_width]
patch_path = os.path.join(outpath, filename + '_' + str(num_) + '.png')
# im_data_m = im_data_m.transpose(1,2,0)
cv2.imwrite(patch_path, im_data_m, [int(cv2.cv2.IMWRITE_PNG_COMPRESSION),0])
# write_img(os.path.join(outpath, filename + '_' + str(num_) + '.tif'), im_proj, im_geotrans, im_data_m)
j=j+stride
if (new_w-j) <new_width:
num_=num_+1
# im_data_m=extent_data[0,new_h-new_width:new_h,new_w-new_width:new_w]
im_data_m = extent_data[ new_h - new_width:new_h, new_w - new_width:new_w]
patch_path = os.path.join(outpath, filename + '_' + str(num_) + '.png')
# im_data_m = im_data_m.transpose(1,2,0)
cv2.imwrite(patch_path, im_data_m, [int(cv2.cv2.IMWRITE_PNG_COMPRESSION),0])
# write_img(os.path.join(outpath, filename + '_' + str(num_) + '.tif'), im_proj, im_geotrans, im_data_m)
j=new_w+1
i=new_h+1
if __name__ == '__main__':
print('Clip image...')
in_img_path = 'C:\\Users\\Charm Luo\\Desktop\\my-data\\duofenlei\\DEN-SENET\\camvid\\valid_images\\'
out_img_path = 'C:\\Users\\Charm Luo\\Desktop\\my-data\\duofenlei\\DEN-SENET\\camvid\\valid_images_cut\\'
gdal_image_clip(in_img_path, out_img_path, new_width=256, stride=256)
print('Clip label...')
in_label_path = 'C:\\Users\\Charm Luo\\Desktop\\my-data\\duofenlei\\DEN-SENET\\camvid\\valid_labels\\'
out_label_path = 'C:\\Users\\Charm Luo\\Desktop\\my-data\\duofenlei\\DEN-SENET\\camvid\\valid_labels_cut\\'
gdal_label_clip(in_label_path, out_label_path, new_width=256, stride=256) | 50.174603 | 133 | 0.49246 |
acf5ca1a37b69389256227c570c65eed96e3228e | 2,251 | py | Python | venv/Lib/site-packages/pycparser/ply/ygen.py | gilbertekalea/booking.com_crawler | 71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae | [
"MIT"
] | 9,953 | 2019-04-03T23:41:04.000Z | 2022-03-31T11:54:44.000Z | venv/Lib/site-packages/pycparser/ply/ygen.py | gilbertekalea/booking.com_crawler | 71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae | [
"MIT"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | lib/python2.7/site-packages/pycparser/ply/ygen.py | anish03/weather-dash | d517fa9da9028d1fc5d8fd71d77cee829ddee87b | [
"MIT"
] | 2,803 | 2019-04-06T13:15:33.000Z | 2022-03-31T07:42:01.000Z | # ply: ygen.py
#
# This is a support program that auto-generates different versions of the YACC parsing
# function with different features removed for the purposes of performance.
#
# Users should edit the method LParser.parsedebug() in yacc.py. The source code
# for that method is then used to create the other methods. See the comments in
# yacc.py for further details.
import os.path
import shutil
def get_source_range(lines, tag):
srclines = enumerate(lines)
start_tag = '#--! %s-start' % tag
end_tag = '#--! %s-end' % tag
for start_index, line in srclines:
if line.strip().startswith(start_tag):
break
for end_index, line in srclines:
if line.strip().endswith(end_tag):
break
return (start_index + 1, end_index)
def filter_section(lines, tag):
filtered_lines = []
include = True
tag_text = '#--! %s' % tag
for line in lines:
if line.strip().startswith(tag_text):
include = not include
elif include:
filtered_lines.append(line)
return filtered_lines
def main():
dirname = os.path.dirname(__file__)
shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
lines = f.readlines()
parse_start, parse_end = get_source_range(lines, 'parsedebug')
parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
# Get the original source
orig_lines = lines[parse_start:parse_end]
# Filter the DEBUG sections out
parseopt_lines = filter_section(orig_lines, 'DEBUG')
# Filter the TRACKING sections out
parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
# Replace the parser source sections with updated versions
lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
lines[parseopt_start:parseopt_end] = parseopt_lines
lines = [line.rstrip()+'\n' for line in lines]
with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
f.writelines(lines)
print('Updated yacc.py')
if __name__ == '__main__':
main()
| 30.013333 | 94 | 0.684585 |
acf5ca8501fea344a2ab947a0a5895d4d7fcfe95 | 5,176 | py | Python | graphs/reorder.py | vishalbelsare/graphs | 4fbeb025dfe33340335f34300f58dd3809228822 | [
"MIT"
] | 15 | 2015-12-31T21:48:56.000Z | 2020-11-09T13:34:41.000Z | graphs/reorder.py | perimosocordiae/graphs | 4fbeb025dfe33340335f34300f58dd3809228822 | [
"MIT"
] | null | null | null | graphs/reorder.py | perimosocordiae/graphs | 4fbeb025dfe33340335f34300f58dd3809228822 | [
"MIT"
] | 7 | 2015-09-18T14:26:00.000Z | 2018-10-21T11:46:11.000Z | '''Sparse symmetric matrix reordering to reduce bandwidth/diagonalness.
Methods:
- cuthill_mckee
- node_centroid_hill_climbing
- laplacian_reordering
References:
- ftp://ftp.numerical.rl.ac.uk/pub/talks/jas.ala06.24VII06.pdf
- http://www.jstor.org/stable/2156090 (profile defn, NYI RCM improvements)
- https://www.cs.purdue.edu/homes/apothen/env3.pdf (laplacian, NYI sloan alg)
'''
from __future__ import absolute_import, print_function
from collections import deque
import numpy as np
import scipy.sparse.csgraph as ssc
from graphs import Graph
from .mini_six import range
__all__ = [
'permute_graph', 'cuthill_mckee', 'node_centroid_hill_climbing',
'laplacian_reordering'
]
def permute_graph(G, order):
'''Reorder the graph's vertices, returning a copy of the input graph.
order : integer array-like, some permutation of range(G.num_vertices()).
'''
adj = G.matrix('dense')
adj = adj[np.ix_(order, order)]
return Graph.from_adj_matrix(adj)
def _cuthill_mckee(G):
n = G.num_vertices()
queue = deque([])
result = []
degree = G.degree()
remaining = dict(enumerate(degree))
adj = G.matrix('dense', 'csr')
while len(result) != n:
queue.append(min(remaining, key=remaining.get))
while queue:
p = queue.popleft()
if p not in remaining:
continue
result.append(p)
del remaining[p]
nbrs = [c for c in np.where(adj[p])[0] if c in remaining]
queue.extend(sorted(nbrs, key=remaining.get))
return permute_graph(G, np.array(result))
if hasattr(ssc, 'reverse_cuthill_mckee'): # pragma: no cover
def cuthill_mckee(G):
sG = G.matrix('csr')
order = ssc.reverse_cuthill_mckee(sG, symmetric_mode=True)
return permute_graph(G, order)
else: # pragma: no cover
cuthill_mckee = _cuthill_mckee
cuthill_mckee.__doc__ = 'Reorder vertices using the Cuthill-McKee algorithm.'
def laplacian_reordering(G):
'''Reorder vertices using the eigenvector of the graph Laplacian corresponding
to the first positive eigenvalue.'''
L = G.laplacian()
vals, vecs = np.linalg.eigh(L)
min_positive_idx = np.argmax(vals == vals[vals>0].min())
vec = vecs[:, min_positive_idx]
return permute_graph(G, np.argsort(vec))
def node_centroid_hill_climbing(G, relax=1, num_centerings=20, verbose=False):
'''Iterative reordering method based on alternating rounds of node-centering
and hill-climbing search.'''
# Initialize order with BFS from a random start node.
order = _breadth_first_order(G)
for it in range(num_centerings):
B = permute_graph(G, order).bandwidth()
nc_order = _node_center(G, order, relax=relax)
nc_B = permute_graph(G, nc_order).bandwidth()
if nc_B < B:
if verbose: # pragma: no cover
print('post-center', B, nc_B)
order = nc_order
order = _hill_climbing(G, order, verbose=verbose)
return permute_graph(G, order)
def _breadth_first_order(G):
inds = np.arange(G.num_vertices())
adj = G.matrix('dense', 'csr')
total_order = []
while len(inds) > 0:
order = ssc.breadth_first_order(adj, np.random.choice(inds),
return_predecessors=False)
inds = np.setdiff1d(inds, order, assume_unique=True)
total_order = np.append(total_order, order)
return total_order.astype(int)
def _critical_vertices(G, order, relax=1, bw=None):
go = permute_graph(G, order)
if bw is None:
bw = go.bandwidth()
adj = go.matrix('dense')
if relax == 1:
for i in np.where(np.diag(adj, -bw))[0]:
yield bw + i, i
else:
crit = relax * bw
for u, v in np.transpose(np.where(np.tril(adj, -np.floor(crit)))):
if np.abs(u-v) >= crit:
yield u, v
def _node_center(G, order, relax=0.99):
weights = order.copy().astype(float)
counts = np.ones_like(order)
inv_order = np.argsort(order)
for i, j in _critical_vertices(G, order, relax):
u = inv_order[i]
v = inv_order[j]
weights[u] += j # order[v]
counts[u] += 1
weights[v] += i # order[u]
counts[v] += 1
weights /= counts
return np.argsort(weights)
def _hill_climbing(G, order, verbose=False):
B = permute_graph(G, order).bandwidth()
while True:
inv_order = np.argsort(order)
for i, j in _critical_vertices(G, order, bw=B):
u = inv_order[i]
v = inv_order[j]
for w,k in enumerate(order):
if not (k < i or k > j):
continue
new_order = order.copy()
if k < i:
new_order[[u,w]] = new_order[[w,u]]
elif k > j:
new_order[[v,w]] = new_order[[w,v]]
new_B = permute_graph(G, new_order).bandwidth()
if new_B < B:
order = new_order
if verbose: # pragma: no cover
print('improved B', B, new_B)
B = new_B
break
elif new_B == B:
nc = sum(1 for _ in _critical_vertices(G, order, bw=B))
new_nc = sum(1 for _ in _critical_vertices(G, new_order, bw=B))
if new_nc < nc:
order = new_order
if verbose: # pragma: no cover
print('improved nc', nc, new_nc)
break
else:
continue
break
else:
break
return order
| 30.447059 | 80 | 0.648957 |
acf5ca9396c6e80a75509585d9abecc5dbb2cdb0 | 5,519 | py | Python | tests/st/networks/test_gpu_lstm.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | 2 | 2020-04-28T03:49:10.000Z | 2020-04-28T03:49:13.000Z | tests/st/networks/test_gpu_lstm.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | null | null | null | tests/st/networks/test_gpu_lstm.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.nn.optim import Momentum
from mindspore.ops import operations as P
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn import Dense
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
def InitialLstmWeight(input_size, hidden_size, num_layers, bidirectional, has_bias=False):
num_directions = 1
if bidirectional:
num_directions = 2
weight_size = 0
gate_size = 4 * hidden_size
for layer in range(num_layers):
for d in range(num_directions):
input_layer_size = input_size if layer == 0 else hidden_size * num_directions
weight_size += gate_size * input_layer_size
weight_size += gate_size * hidden_size
if has_bias:
weight_size += 2 * gate_size
w_np = np.ones([weight_size, 1, 1]).astype(np.float32) * 0.01
w = Parameter(initializer(Tensor(w_np), w_np.shape), name='w')
h = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='h')
c = Parameter(initializer(
Tensor(np.ones((num_layers * num_directions, batch_size, hidden_size)).astype(np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='c')
return h, c, w
class SentimentNet(nn.Cell):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
bidirectional, weight, labels, batch_size):
super(SentimentNet, self).__init__()
self.num_hiddens = num_hiddens
self.num_layers = num_layers
self.bidirectional = bidirectional
self.batch_size = batch_size
self.embedding = nn.Embedding(vocab_size, embed_size, use_one_hot=False, embedding_table=Tensor(weight))
self.embedding.embedding_table.requires_grad = False
self.trans = P.Transpose()
self.perm = (1, 0, 2)
self.h, self.c, self.w = InitialLstmWeight(embed_size, num_hiddens, num_layers, bidirectional)
self.encoder = P.LSTM(input_size=embed_size, hidden_size=self.num_hiddens,
num_layers=num_layers, has_bias=False,
bidirectional=self.bidirectional, dropout=0.0)
self.concat = P.Concat(2)
if self.bidirectional:
self.decoder = nn.Dense(num_hiddens * 4, labels)
else:
self.decoder = nn.Dense(num_hiddens * 2, labels)
self.slice1 = P.Slice()
self.slice2 = P.Slice()
self.reshape = P.Reshape()
self.num_direction = 1
if bidirectional:
self.num_direction = 2
def construct(self, inputs):
embeddings = self.embedding(inputs)
embeddings = self.trans(embeddings, self.perm)
output, hidden = self.encoder(embeddings, self.h, self.c, self.w)
output0 = self.slice1(output, (0, 0, 0), (1, 64, 200))
output1 = self.slice2(output, (499, 0, 0), (1, 64, 200))
encoding = self.concat((output0, output1))
encoding = self.reshape(encoding, (self.batch_size, self.num_hiddens * self.num_direction * 2))
outputs = self.decoder(encoding)
return outputs
batch_size = 64
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_LSTM():
num_epochs = 5
embed_size = 100
num_hiddens = 100
num_layers = 2
bidirectional = True
labels = 2
vocab_size = 252193
max_len = 500
weight = np.ones((vocab_size+1, embed_size)).astype(np.float32)
net = SentimentNet(vocab_size=(vocab_size+1), embed_size=embed_size,
num_hiddens=num_hiddens, num_layers=num_layers,
bidirectional=bidirectional, weight=weight,
labels=labels, batch_size=batch_size)
learning_rate = 0.1
momentum = 0.9
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum)
criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
net_with_criterion = WithLossCell(net, criterion)
train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer
train_network.set_train()
train_features = Tensor(np.ones([64, max_len]).astype(np.int32))
train_labels = Tensor(np.ones([64, ]).astype(np.int32)[0:64])
losses = []
for epoch in range(num_epochs):
loss = train_network(train_features, train_labels)
losses.append(loss)
print("loss:", loss.asnumpy())
assert(losses[-1].asnumpy() < 0.01)
| 38.326389 | 112 | 0.671498 |
acf5cb899d4263ae6a7789132cb58e18e4eea786 | 388 | py | Python | inference/src/ops/gpu_ops.py | Sergio0694/sepconv-gan | 82d908ed5c3dd55d7b2f8603450dac5108751a3b | [
"MIT"
] | 1 | 2021-08-07T16:30:05.000Z | 2021-08-07T16:30:05.000Z | inference/src/ops/gpu_ops.py | Sergio0694/sepconv-gan | 82d908ed5c3dd55d7b2f8603450dac5108751a3b | [
"MIT"
] | null | null | null | inference/src/ops/gpu_ops.py | Sergio0694/sepconv-gan | 82d908ed5c3dd55d7b2f8603450dac5108751a3b | [
"MIT"
] | null | null | null | import os
import tensorflow as tf
SEPCONV_SO_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sepconv.so')
NEAREST_SHADER_SO_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'nearest_shader.so')
def load_ops():
'''Loads the custom GPU ops used in the network.'''
tf.load_op_library(SEPCONV_SO_PATH)
tf.load_op_library(NEAREST_SHADER_SO_PATH)
| 32.333333 | 102 | 0.762887 |
acf5ce9307ab864eb265bbbc56d885268d21c6f1 | 2,926 | py | Python | mmedit/datasets/generation_unpaired_dataset.py | Yshuo-Li/mmediting-test | ff8349a183b3d266495a53be0c8ad8e342e8b461 | [
"Apache-2.0"
] | 2 | 2021-04-20T11:31:37.000Z | 2021-05-27T13:04:40.000Z | mmedit/datasets/generation_unpaired_dataset.py | Yshuo-Li/mmediting-test | ff8349a183b3d266495a53be0c8ad8e342e8b461 | [
"Apache-2.0"
] | 1 | 2021-08-05T16:20:39.000Z | 2021-08-05T16:20:39.000Z | mmedit/datasets/generation_unpaired_dataset.py | Yshuo-Li/mmediting-test | ff8349a183b3d266495a53be0c8ad8e342e8b461 | [
"Apache-2.0"
] | 2 | 2021-04-22T12:10:14.000Z | 2021-05-19T02:09:48.000Z | import os.path as osp
import numpy as np
from .base_generation_dataset import BaseGenerationDataset
from .registry import DATASETS
@DATASETS.register_module()
class GenerationUnpairedDataset(BaseGenerationDataset):
"""General unpaired image folder dataset for image generation.
It assumes that the training directory of images from domain A is
'/path/to/data/trainA', and that from domain B is '/path/to/data/trainB',
respectively. '/path/to/data' can be initialized by args 'dataroot'.
During test time, the directory is '/path/to/data/testA' and
'/path/to/data/testB', respectively.
Args:
dataroot (str | :obj:`Path`): Path to the folder root of unpaired
images.
pipeline (List[dict | callable]): A sequence of data transformations.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self, dataroot, pipeline, test_mode=False):
super().__init__(pipeline, test_mode)
phase = 'test' if test_mode else 'train'
self.dataroot_a = osp.join(str(dataroot), phase + 'A')
self.dataroot_b = osp.join(str(dataroot), phase + 'B')
self.data_infos_a = self.load_annotations(self.dataroot_a)
self.data_infos_b = self.load_annotations(self.dataroot_b)
self.len_a = len(self.data_infos_a)
self.len_b = len(self.data_infos_b)
def load_annotations(self, dataroot):
"""Load unpaired image paths of one domain.
Args:
dataroot (str): Path to the folder root for unpaired images of
one domain.
Returns:
list[dict]: List that contains unpaired image paths of one domain.
"""
data_infos = []
paths = sorted(self.scan_folder(dataroot))
for path in paths:
data_infos.append(dict(path=path))
return data_infos
def prepare_train_data(self, idx):
"""Prepare unpaired training data.
Args:
idx (int): Index of current batch.
Returns:
dict: Prepared training data batch.
"""
img_a_path = self.data_infos_a[idx % self.len_a]['path']
idx_b = np.random.randint(0, self.len_b)
img_b_path = self.data_infos_b[idx_b]['path']
results = dict(img_a_path=img_a_path, img_b_path=img_b_path)
return self.pipeline(results)
def prepare_test_data(self, idx):
"""Prepare unpaired test data.
Args:
idx (int): Index of current batch.
Returns:
list[dict]: Prepared test data batch.
"""
img_a_path = self.data_infos_a[idx % self.len_a]['path']
img_b_path = self.data_infos_b[idx % self.len_b]['path']
results = dict(img_a_path=img_a_path, img_b_path=img_b_path)
return self.pipeline(results)
def __len__(self):
return max(self.len_a, self.len_b)
| 34.833333 | 78 | 0.642174 |
acf5ceb97dd2e90767a2b920e53e113f45fb5e83 | 2,978 | py | Python | bistory.py | sivel/bistory | 6652bcc027962e6a997f760a04228e78e575f90c | [
"Apache-2.0"
] | 15 | 2018-06-28T21:58:28.000Z | 2021-08-30T18:02:05.000Z | bistory.py | sivel/bistory | 6652bcc027962e6a997f760a04228e78e575f90c | [
"Apache-2.0"
] | null | null | null | bistory.py | sivel/bistory | 6652bcc027962e6a997f760a04228e78e575f90c | [
"Apache-2.0"
] | 7 | 2018-06-28T22:09:54.000Z | 2021-09-27T02:36:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import fcntl
import os
import re
import sys
import termios
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import has_focus
from prompt_toolkit.key_binding import KeyBindings
__version__ = '1.1.0'
class HistoryCompleter(Completer):
def __init__(self):
self._hist_file = os.getenv('HISTFILE', '~/.bash_history')
self._history = None
@property
def history(self):
if self._history:
return self._history
with open(os.path.expanduser(self._hist_file), 'rb') as f:
self._history = b''.join(l for l in f.readlines()[::-1]
if not l.startswith(b'#'))
return self._history
def _search(self, text):
line = '.*'.join(re.escape(w) for w in text.split())
_text = b'^(?<!#)(.*)(%s)(.*)$' % line.encode()
matches = re.finditer(_text, self.history, flags=re.I | re.M)
for _ in range(25):
try:
match = next(matches)
except StopIteration:
break
else:
yield match.group().decode()
def get_completions(self, document, complete_event):
for match in self._search(document.text):
yield Completion(match, -document.cursor_position)
def main():
key_bindings = KeyBindings()
default_focused = has_focus(DEFAULT_BUFFER)
# Autocomplete with backspace
@key_bindings.add('backspace', filter=default_focused)
def _(event):
event.current_buffer.delete_before_cursor()
event.current_buffer.insert_text('')
session = PromptSession(
wrap_lines=False,
completer=HistoryCompleter(),
key_bindings=key_bindings,
)
text = '%s\n' % session.prompt('> ')
if text.strip():
for c in text:
fcntl.ioctl(sys.stdout, termios.TIOCSTI, c)
def shell():
sys.stdout.write('\033[F')
sys.stdout.flush()
try:
main()
except (KeyboardInterrupt, EOFError):
sys.stdout.write('\033[F\033[K')
else:
sys.stdout.write('\033[F\033[K\033[F\033[K')
if __name__ == '__main__':
shell()
| 28.361905 | 78 | 0.641706 |
acf5cf9ad1351231f8fc3b64341c2a104a4d65e5 | 16,220 | py | Python | botlang/parser/s_expressions.py | BotCenter/botlang2 | 542a8c80846d211f61ba45605c0fb0b221370186 | [
"MIT"
] | 1 | 2020-11-27T14:41:47.000Z | 2020-11-27T14:41:47.000Z | botlang/parser/s_expressions.py | BotCenter/botlang2 | 542a8c80846d211f61ba45605c0fb0b221370186 | [
"MIT"
] | 8 | 2019-01-03T17:33:14.000Z | 2019-07-15T21:16:30.000Z | botlang/parser/s_expressions.py | BotCenter/botlang2 | 542a8c80846d211f61ba45605c0fb0b221370186 | [
"MIT"
] | 1 | 2019-05-01T22:13:07.000Z | 2019-05-01T22:13:07.000Z | import ast as python_ast
from botlang.ast.ast import *
from botlang.evaluation.oop import OopHelper
from botlang.evaluation.values import Nil
class BotLangSyntaxError(Exception):
def __init__(self, message):
super(BotLangSyntaxError, self).__init__(message)
class SExpression(object):
"""
https://en.wikipedia.org/wiki/S-expression
"""
OPENING_PARENS = ['(', '[', '{']
CLOSING_PARENS = [')', ']', '}']
def to_ast(self):
raise NotImplementedError
def accept(self, visitor):
raise NotImplementedError
def copy(self):
raise NotImplementedError
@classmethod
def is_tree(cls):
return False
@classmethod
def is_atom(cls):
return False
class Atom(SExpression):
TRUE_TOKENS = ['#t', 'true']
FALSE_TOKENS = ['#f', 'false']
@classmethod
def is_atom(cls):
return True
def __init__(self, token, source_reference):
self.code = token
self.source_reference = source_reference
def __repr__(self):
return 'Atom({})'.format(self.code)
def accept(self, visitor):
return visitor.visit_atom(self)
def copy(self):
return Atom(self.code, self.source_reference)
@property
def token(self):
return self.code
def to_ast(self, quoted_parent=False):
try:
return self.as_boolean_value()
except ValueError:
pass
try:
return self.as_integer_value()
except ValueError:
pass
try:
return self.as_float_value()
except ValueError:
pass
if self.is_string():
return self.as_string_value()
if self.is_symbol() or quoted_parent:
return self.as_symbol_value(quoted_parent)
return self.as_identifier()
def is_boolean(self):
return self.code in self.TRUE_TOKENS + self.FALSE_TOKENS
def is_integer(self):
try:
self.as_integer_value()
except ValueError:
return False
else:
return True
def is_float(self):
try:
self.as_float_value()
except ValueError:
return False
else:
return True
def is_number(self):
return self.is_integer() or self.is_float()
def is_identifier(self):
return \
not self.is_boolean() \
and not self.is_number() \
and not self.is_string() \
and not self.is_symbol()
def as_boolean_value(self):
if self.code in self.TRUE_TOKENS:
return Val(True).add_code_reference(self)
if self.code in self.FALSE_TOKENS:
return Val(False).add_code_reference(self)
raise ValueError
def as_integer_value(self):
return Val(int(self.code)).add_code_reference(self)
def as_float_value(self):
return Val(float(self.code)).add_code_reference(self)
def as_quoted(self):
return self.to_ast(quoted_parent=True)
def as_string_value(self):
return Val(
python_ast.literal_eval(self.code.replace('\n', '\\n'))
).add_code_reference(self)
def as_symbol_value(self, quoted_parent):
symbol = self.token if quoted_parent else self.token[1:]
return Val(symbol).add_code_reference(self)
def as_identifier(self):
return Id(self.token).add_code_reference(self)
def is_string(self):
return self.code.startswith('"') and self.code.endswith('"')
def is_symbol(self):
return self.code.startswith("'")
class Tree(SExpression):
@classmethod
def is_tree(cls):
return True
def __init__(self, children, code, source_reference, quoted=False):
self.children = children
self.code = code
self.source_reference = source_reference
self.quoted = quoted
def __repr__(self):
return 'Tree({})'.format(self.children)
def accept(self, visitor):
return visitor.visit_tree(self)
def copy(self):
return Tree(
[child.copy() for child in self.children],
self.code,
self.source_reference,
self.quoted
)
def as_quoted(self):
return ListVal([
child.as_quoted() for child in self.children
]).add_code_reference(self)
def to_ast(self):
if self.quoted or len(self.children) == 0:
return self.as_quoted()
first = self.children[0].code
if first == 'if':
return self.if_node()
if first == 'cond':
return self.cond_node()
if first == 'defclass':
return self.class_definition_node()
if first == 'and':
return self.and_node()
if first == 'or':
return self.or_node()
if first == 'define':
return self.define_node()
if first == 'local':
return self.local_node()
if first == 'begin':
return self.begin_node()
if first == 'fun' or first == 'function':
return self.function_node(self.children)
if first == 'bot-node':
return self.bot_node()
if first == 'slots-node':
return self.slots_node()
if first == 'node-result':
return self.bot_result_node()
if first == 'module':
return self.module_definition_node()
if first == 'provide':
return self.module_export_node()
if first == 'require':
return self.module_import_node()
if first == 'define-syntax-rule':
return self.define_syntax_rule_node()
return self.application_node()
def module_definition_node(self):
try:
module_body = BodySequence(
[s_expr.to_ast() for s_expr in self.children[2:]]
).add_code_reference(self)
return ModuleDefinition(
self.children[1].to_ast(),
module_body
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("A module definition requires a name and a body")
def module_export_node(self):
return ModuleFunctionExport(
[identifier.to_ast() for identifier in self.children[1:]]
).add_code_reference(self)
def module_import_node(self):
try:
return ModuleImport(
self.children[1].to_ast()
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("An import requires the name of the module being imported.")
def if_node(self):
try:
return If(
self.children[1].to_ast(),
self.children[2].to_ast(),
self.children[3].to_ast() if len(self.children) > 3 else Val(Nil)
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError('An if statement requires at least 3 parameters')
def cond_node(self):
return Cond(
[child.to_cond_clause_ast_node() for child in self.children[1:]]
).add_code_reference(self)
def to_cond_clause_ast_node(self):
first = self.children[0].code
if first == 'else':
return CondElseClause(
self.children[1].to_ast()
).add_code_reference(self)
return CondPredicateClause(
self.children[0].to_ast(),
self.children[1].to_ast()
).add_code_reference(self)
def class_definition_node(self):
try:
properties = self.children[2:]
superclass = self.get_superclass(properties)
attributes = self.get_instance_attributes(properties)
class_attributes = self.get_class_attributes(properties)
methods = self.get_instance_methods(properties)
class_methods = self.get_class_methods(properties)
return ClassDefinition(
self.children[1].code,
superclass,
attributes,
methods,
class_attributes,
class_methods
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("A class definition requires, at the very least, a name")
@classmethod
def get_superclass(cls, properties):
try:
extends = [
expr.children[1].code
for expr in properties if expr.children[0].code == 'extends'
]
return extends[0]
except IndexError:
return OopHelper.OBJECT_CLASS_NAME
@classmethod
def get_attributes(cls, class_properties, attributes_key):
try:
attributes_def = [
expr.children[1:] for expr in class_properties
if expr.children[0].code == attributes_key
][0]
return [
AttributeDefinition(
child.children[0].code,
child.children[1].to_ast()
) if child.is_tree()
else AttributeDefinition(child.code, None)
for child in attributes_def
]
except IndexError:
return []
@classmethod
def get_instance_attributes(cls, class_properties):
return cls.get_attributes(class_properties, 'attributes')
@classmethod
def get_class_attributes(cls, class_properties):
return cls.get_attributes(class_properties, 'class-attributes')
@classmethod
def get_methods(cls, class_properties, methods_key):
try:
return [
[
MethodDefinition(
child.children[0].code,
child.children[1].to_ast()
)
for child in expr.children[1:]
]
for expr in class_properties
if expr.children[0].code == methods_key
][0]
except IndexError:
return []
@classmethod
def get_instance_methods(cls, class_properties):
return cls.get_methods(class_properties, 'methods')
@classmethod
def get_class_methods(cls, class_properties):
return cls.get_methods(class_properties, 'class-methods')
def and_node(self):
return And(
[child.to_ast() for child in self.children[1:]]
).add_code_reference(self)
def or_node(self):
return Or(
[child.to_ast() for child in self.children[1:]]
).add_code_reference(self)
def define_node(self):
try:
return Definition(
self.children[1].code,
self.children[2].to_ast()
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("A define requires a name and a body")
def local_node(self):
return Local(
[
Definition(
d.children[0].code,
d.children[1].to_ast()
).add_code_reference(d)
for d in self.children[1].children
],
self.children[2].to_ast()
).add_code_reference(self)
def begin_node(self):
return BodySequence(
[s_expr.to_ast() for s_expr in self.children[1:]]
).add_code_reference(self)
def function_node(self, children):
try:
function_body = BodySequence(
[s_expr.to_ast() for s_expr in children[2:]]
).add_code_reference(self)
return Fun(
[identifier.code for identifier in children[1].children],
function_body
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("A function declaration requires a name and a body.")
def bot_node(self):
try:
bot_node_body = BodySequence(
[s_expr.to_ast() for s_expr in self.children[2:]]
).add_code_reference(self)
return BotNode(
[identifier.code for identifier in self.children[1].children],
bot_node_body
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("A bot-node must provide identifiers for the received context and message, "
"plus a body")
def slots_node(self):
try:
node_name = self.children[1].token
args = [identifier.token for identifier in self.children[2].children]
blocks = self.children[3:]
self.check_slots_node_blocks(blocks)
before = self.get_slots_before(blocks)
digress = self.get_slots_digress(blocks)
slots = self.get_slots(blocks)
then = self.get_slots_then(blocks)
slots_node_body = SlotsNodeBody(
args, before, digress, slots, then
).add_code_reference(self)
return BotSlotsNode(node_name, args, slots_node_body)\
.add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("A slots-node requires a name, to receive the context and message being passed to "
"it, and a number of slot blocks.")
@classmethod
def get_slots_before(cls, blocks):
for block in blocks:
if block.children[0].token == 'before':
return block.children[1].to_ast()
return None
@classmethod
def get_slots_digress(cls, blocks):
for block in blocks:
if block.children[0].token == 'digress':
return block.children[1].to_ast()
return None
@classmethod
def get_slots(cls, blocks):
return [
block.to_slot_ast_node() for block in blocks
if block.children[0].token == 'slot'
]
@classmethod
def get_slots_then(cls, blocks):
for block in blocks:
if block.children[0].token == 'then':
return block.children[1].to_ast()
raise BotLangSyntaxError("The 'then' block is required for slot nodes")
@classmethod
def check_slots_node_blocks(cls, blocks):
for block in blocks:
token = block.children[0].token
if token not in ['before', 'digress', 'slot', 'then']:
raise BotLangSyntaxError('Unknown slots node block: %s' % token)
def to_slot_ast_node(self):
return SlotDefinition(
self.children[1].token,
self.children[2].token,
self.children[3].to_ast(),
self.children[4].to_ast() if len(self.children) > 4 else None
).add_code_reference(self)
def bot_result_node(self):
try:
return BotResult(
self.children[1].to_ast(),
self.children[2].to_ast(),
self.children[3].to_ast()
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("A node-result requires three parameters (the new context, the message to send "
"and the next node to execute.")
def application_node(self):
return App(
self.children[0].to_ast(),
[s_expr.to_ast() for s_expr in self.children[1:]]
).add_code_reference(self)
def define_syntax_rule_node(self):
try:
pattern = self.children[1].children
pattern_node = SyntaxPattern(pattern[0], pattern[1:])
return DefineSyntax(
pattern_node.add_code_reference(pattern_node),
self.children[2]
).add_code_reference(self)
except IndexError:
raise BotLangSyntaxError("A define-syntax-rule requires two arguments: first, the pattern node, which must "
"have an identifier (and optionally, arguments); second, the corresponding "
"template.")
| 28.013817 | 120 | 0.573428 |
acf5d12df549cc57f2776209eae6dcc193227498 | 2,138 | py | Python | getPILab.py | vsoch/authorSynth | 083541cbd9d662899eb2103c0e8840b44dee714b | [
"MIT"
] | null | null | null | getPILab.py | vsoch/authorSynth | 083541cbd9d662899eb2103c0e8840b44dee714b | [
"MIT"
] | null | null | null | getPILab.py | vsoch/authorSynth | 083541cbd9d662899eb2103c0e8840b44dee714b | [
"MIT"
] | 2 | 2015-04-17T22:28:48.000Z | 2021-01-06T00:05:43.000Z | #!/usr/bin/python
# This script will take in authors and coauthors to return PI lab groups in the form of:
# PIUUID UUIDS PAPERNUMS in order of descending paper numbers
# NOTE: Because last author/paper associations have not been kept,
# it is not possible to distinguish which author was in which labs
# when two authors defined as last author at some point, co-published
# This script was developed by not used in the original authorSynth application
keyfile = open('data\\authors.txt')
keyfile = keyfile.readlines()
header = keyfile.pop(0).strip("\n").split("\t")
pibool = header.index("PI")
uindex = header.index("UUIDS")
coauthfile = open('data\\coauthnet.txt')
coauthfile = coauthfile.readlines()
header = coauthfile.pop(0).strip("\n").split("\t")
couindex = header.index("UUID")
numcopap = header.index("NUMPAPERS")
ids = []
pids = []
for entries in keyfile:
ids.append(entries.strip("\n").split("\t")[uindex])
pids.append(entries.strip("\n").split("\t")[pibool])
piuuids = []
for foo in range(len(ids)):
if int(pids[foo]):
piuuids.append(ids[foo])
couuids = []
numcopapers = []
for entries in coauthfile:
couuids.append(entries.strip("\n").split("\t")[couindex])
numcopapers.append(entries.strip("\n").split("\t")[numcopap])
labmembers = dict()
#membernum = dict()
for pis in piuuids:
labmembers[pis]= []
# membernum[pis] = []
for pis in piuuids:
for indy, coauths in enumerate(couuids):
if pis in coauths.split(","):
labpos = int(not(coauths.split(',').index(pis)))
labmembers[pis].append((coauths.split(',')[labpos],(numcopapers[indy])))
# membernum[pis].append(numcopapers[indy])
for pis in piuuids:
labmembers[pis] = sorted(labmembers[pis], key=lambda t:int(t[1]), reverse=1)
labmembers = labmembers.items()
outfile = open('data\\pilabmembers.txt','w')
outfile.writelines("PIUUID\tUUIDS\tNUMPAPERS\n")
for foo in range(len(labmembers)):
uuid = labmembers[foo][0]
labmem = labmembers[foo][1]
if labmem:
labnames,labpapers = zip(*labmem)
labnames = ",".join(labnames)
labpapers = ",".join(labpapers)
line = uuid + "\t" + labnames + "\t" + labpapers + "\n"
outfile.writelines(line)
| 30.112676 | 88 | 0.70159 |
acf5d179423553c6ec67bba50c0d52f1a94c53a5 | 2,331 | py | Python | HW2/hw2_ilovepdf_split_all/kaggle.py | saifvazir/Machine-Learning | cfbce0f4a15ea90fcc57d53ef82c84c87e6fbe27 | [
"MIT"
] | null | null | null | HW2/hw2_ilovepdf_split_all/kaggle.py | saifvazir/Machine-Learning | cfbce0f4a15ea90fcc57d53ef82c84c87e6fbe27 | [
"MIT"
] | null | null | null | HW2/hw2_ilovepdf_split_all/kaggle.py | saifvazir/Machine-Learning | cfbce0f4a15ea90fcc57d53ef82c84c87e6fbe27 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as mp
import pandas as pd
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from numpy.linalg import inv
from math import sqrt
from scipy import sparse
def ridgeReg(X,y,l):
print l
one=np.ones(shape=(1,X.shape[1]))
X=np.vstack((X,one))
X_trans=X.transpose()
identity=np.identity(X.shape[0]-1) #kxk identity matrix
zero=np.zeros(shape=(X.shape[0]-1,1)) #kx1 zero matrix
identity=np.hstack((identity,zero))
identity=np.vstack((identity,np.append((np.transpose(zero)),0)))
C=np.dot(X,X_trans)
#C=C.toarray()
t=np.multiply(l,identity)
C+=t
#C=C.todense()
d=np.dot(X,y)
C_inv=inv(C)
w=np.dot(C_inv,d) #weight matrix when trained on entire training data
temp=np.dot(X_trans,w) -y
w_trans=np.transpose(w)
obj=np.multiply(l,np.dot(w_trans,w)) + np.dot(np.transpose(temp),temp)
cvErrs=np.empty(shape=(X.shape[1],1))
for i in range(0,X.shape[1]):
x_i=X[:,i]
error=(np.dot(w_trans,x_i)-y.iat[i,0])/(1-np.dot(np.transpose(x_i),np.dot(C_inv,x_i)))
cvErrs=np.append(cvErrs,error)
b=w.item(X.shape[0]-1)
w=np.delete(w,X.shape[0]-1,0)
return w,obj,b,cvErrs
X_t=pd.read_csv('trainData.csv')
y_t=pd.read_csv('trainLabels.csv')
'''X_v=pd.read_csv('valData.csv')
y_v=pd.read_csv('valLabels.csv')'''
X_t=X_t.drop(X_t.columns[0],axis=1)
y_t=y_t.drop(y_t.columns[0],axis=1)
#X_new = SelectKBest(mutual_info_regression, k=100).fit_transform(X_t, y_t)
X_test=pd.read_csv('testData.csv')
X_test=X_test.drop(X_test.columns[0],axis=1)
print X_test.shape
'''X_v=X_v.drop(X_v.columns[0],axis=1)
y_v=y_v.drop(y_v.columns[0],axis=1)
'''
rmvalues_t=[]
rmvalues_v=[]
cverr_t=[]
obj_values=[]
#cverr_v=[]
l=[0.7]
weight_max=0.0
predictions=np.empty(shape=(1,X_t.shape[0]))
for each in l:
weights_t,obj_cost_t,bias_t,cverror_t=ridgeReg(X_t.transpose(),y_t,each)
print sqrt(np.sum(np.square(cverror_t))/5000)
predictions=np.add(np.dot(X_test,weights_t),bias_t)
weight_max=max(weights_t)
frame=pd.DataFrame(data=predictions)
frame.to_csv('predTestLabels.csv',encoding='utf-8',index=True)
| 29.506329 | 95 | 0.646933 |
acf5d18bfc13c56858a10cf801351b5c89600592 | 7,457 | py | Python | models/claim_breadth/preprocess_test.py | rcmckee/patents-public-data | b9b20d6ad6b18d5547be26b267a2c48ee6b5fa34 | [
"Apache-2.0"
] | 346 | 2017-10-31T17:48:05.000Z | 2022-03-30T23:47:52.000Z | models/claim_breadth/preprocess_test.py | rcmckee/patents-public-data | b9b20d6ad6b18d5547be26b267a2c48ee6b5fa34 | [
"Apache-2.0"
] | 44 | 2018-05-08T12:32:28.000Z | 2022-03-08T02:54:44.000Z | models/claim_breadth/preprocess_test.py | rcmckee/patents-public-data | b9b20d6ad6b18d5547be26b267a2c48ee6b5fa34 | [
"Apache-2.0"
] | 130 | 2017-11-02T10:20:38.000Z | 2022-03-31T04:16:49.000Z | # Copyright 2018 Google Inc. All Rights Reserved. Licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""End-to-end test for the patent claim breadth model preprocessing code."""
import logging
import os
import shutil
import time
import unittest
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
import preprocess
import tensorflow as tf
# Assumes you've set an environmental variable for your GCP project. See README.
PROJECT = os.environ['GCP_PROJECT']
def read_example_proto(test_dir):
filenames = tf.gfile.Glob(os.path.join(test_dir, '*.tfrecord.gz'))
tf_opt = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP)
record = next(tf.python_io.tf_record_iterator(filenames[0], options=tf_opt))
example = tf.train.Example()
example.ParseFromString(record)
return example
def get_pipeline_metric(results, metric_name, index=0, result_type='counters'):
metric_filter = MetricsFilter().with_name(metric_name)
query_result = results.metrics().query(metric_filter)
try:
return query_result[result_type][index].committed
except IndexError:
logging.info(
'No key in metrics for %s at index %s, returning 0', metric_name, index)
return 0
def get_tf_feature(proto, feature_name, feature_type='float_list'):
"""Helper method to retrieve named features from a TF example proto."""
return getattr(proto.features.feature[feature_name], feature_type).value[0]
def get_test_query(max_records):
return '''
#standardSQL
with fake_applications as (
SELECT
'US-1234567-A1' as publication_number,
substr(claims.text, 0, 2000) as fullclaim,
2000 as priority_yr,
'C08F' as cpc4,
2003 as median_priority_yr
FROM `patents-public-data.patents.publications` p
,UNNEST(claims_localized) claims
WHERE claims.language = 'en'
AND country_code = 'US'
AND claims.text is not null
AND FLOOR(priority_date / 10000) > 2005
limit {half_max}
)
, fake_issued as (
SELECT
'US-1234567-B2' as publication_number,
substr(claims.text, 0, 2000) as fullclaim,
2012 as priority_yr,
'C08F' as cpc4,
2003 as median_priority_yr
FROM `patents-public-data.patents.publications` p
,UNNEST(claims_localized) claims
WHERE claims.language = 'en'
AND country_code = 'US'
AND claims.text is not null
AND FLOOR(priority_date / 10000) > 2005
limit {half_max}
)
select * from fake_applications
union all
select * from fake_issued
'''.format(half_max=(max_records // 2))
class PreProcessE2E(unittest.TestCase):
# Enable nose tests running in parallel
_multiprocess_can_split_ = True
OUTPUT_DIR = os.getcwd()
TOTAL_RECORDS = 500
TEST_QUERY = get_test_query(TOTAL_RECORDS)
@attr('IT')
def test_train_mode(self):
"""Runs pipeline in train mode outputting train, test and eval filesets."""
test_pipeline = TestPipeline()
# Set extra options to the pipeline for test purpose
test_dir = os.path.join(self.OUTPUT_DIR, str(int(time.time())))
self.addCleanup(shutil.rmtree, test_dir)
# Checks that pipeline reaches state "Done"
pipeline_verifiers = [PipelineStateMatcher()]
extra_opts = {
'project': PROJECT,
'output_path': test_dir,
'on_success_matcher': all_of(*pipeline_verifiers),
'runner': 'DirectRunner',
}
res = preprocess.main(
test_pipeline.get_full_options_as_args(**extra_opts),
query=self.TEST_QUERY,
await_completion=True
)
# Check counts coming out of GetFirstClaim step.
parse_first_claim_cnt = get_pipeline_metric(res, 'parse_firstclaim_success')
self.assertEqual(self.TOTAL_RECORDS, parse_first_claim_cnt)
# Check counts coming out of AddFeatures step.
add_features_cnt = get_pipeline_metric(res, 'create_features_success')
self.assertEqual(self.TOTAL_RECORDS, add_features_cnt)
# Check counts coming out of AddLabel step.
broad_cnt = get_pipeline_metric(res, 'add_label_broad')
narrow_cnt = get_pipeline_metric(res, 'add_label_narrow')
self.assertEqual(self.TOTAL_RECORDS, broad_cnt + narrow_cnt)
# Check if the number of records coming out of Train/Test = limit step.
splits = ['train_cnt', 'eval_cnt', 'test_cnt']
train_test_split_cnt = sum(
[get_pipeline_metric(res, m) for m in splits]
)
self.assertEqual(self.TOTAL_RECORDS, train_test_split_cnt)
# Check if number of protos created matched output of train/test split.
create_proto_success = sum(
[get_pipeline_metric(res, 'create_proto_success', index=i)
for i in range(3)]
)
self.assertEqual(self.TOTAL_RECORDS, create_proto_success)
# Open a tf Example and check fields.
example = read_example_proto(test_dir)
for feature_name in preprocess.FEATURE_NAMES:
self.assertGreaterEqual(get_tf_feature(example, feature_name), 0)
# Make sure label feature is present.
labels = ['broad', 'narrow']
self.assertIn(get_tf_feature(example, 'label', 'bytes_list'), labels)
@attr('IT')
def test_inference_mode(self):
"""Runs a pipeline in inference mode which should output one fileset."""
test_pipeline = TestPipeline()
# Set extra options to the pipeline for test purpose
test_dir = os.path.join(self.OUTPUT_DIR, str(int(time.time())))
self.addCleanup(shutil.rmtree, test_dir)
# Checks that pipeline reaches state "Done"
pipeline_verifiers = [PipelineStateMatcher()]
extra_opts = {
'project': PROJECT,
'output_path': test_dir,
'on_success_matcher': all_of(*pipeline_verifiers),
'runner': 'DirectRunner',
'pipeline_mode': 'inference',
}
res = preprocess.main(
test_pipeline.get_full_options_as_args(**extra_opts),
query=self.TEST_QUERY,
await_completion=True
)
# Check counts coming out of GetFirstClaim step.
parse_first_claim_cnt = get_pipeline_metric(res, 'parse_firstclaim_success')
self.assertEqual(self.TOTAL_RECORDS, parse_first_claim_cnt)
# Ensure a proto is created for all input records
create_proto_success = get_pipeline_metric(res, 'create_proto_success')
self.assertEqual(self.TOTAL_RECORDS, create_proto_success)
# Open a tf Example and check fields.
example = read_example_proto(test_dir)
for feature_name in preprocess.FEATURE_NAMES:
self.assertGreaterEqual(get_tf_feature(example, feature_name), 0)
# Make sure label feature is not present since we are in inference.
with self.assertRaises(IndexError):
get_tf_feature(example, 'label', 'bytes_list')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| 36.024155 | 80 | 0.724152 |
acf5d272ff564cc464d02e968880c20c7c94627c | 20,353 | py | Python | PINNTraining/unsteady_Cylinder/case_unCyl_piv_45.py | ls2716/PIV_PINN_data_extraction | 198754c8adeed92eea52e9904a39e993bc475ada | [
"MIT"
] | 2 | 2021-11-19T07:01:08.000Z | 2022-01-09T15:30:18.000Z | PINNTraining/unsteady_Cylinder/case_unCyl_piv_45.py | ls2716/PIV_PINN_data_extraction | 198754c8adeed92eea52e9904a39e993bc475ada | [
"MIT"
] | null | null | null | PINNTraining/unsteady_Cylinder/case_unCyl_piv_45.py | ls2716/PIV_PINN_data_extraction | 198754c8adeed92eea52e9904a39e993bc475ada | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import matplotlib.pyplot as plt
fs = 20
plt.rc('font', size=fs) #controls default text size
plt.rc('axes', titlesize=fs) #fontsize of the title
plt.rc('axes', labelsize=fs) #fontsize of the x and y labels
plt.rc('xtick', labelsize=fs) #fontsize of the x tick labels
plt.rc('ytick', labelsize=fs) #fontsize of the y tick labels
plt.rc('legend', fontsize=fs) #fontsize of the legend
import numpy as np
import math
import sys
import scipy.io
from copy import deepcopy
import deepxde as dde
from equations import RANSpknown2D, RANSf02D, func_zeros
from utilities import set_directory, plot_train_points
# Additional functions
def rotate_points(x, y, x_0, y_0, dtheta):
x = x - x_0
y = y - y_0
r = np.sqrt(x ** 2 + y ** 2)
theta = np.arccos(x / r)
theta[y < 0] = -theta[y < 0] + 2 * math.pi
theta += dtheta
x = r * np.cos(theta) + x_0
y = r * np.sin(theta) + y_0
return np.hstack((x[:, None], y[:, None]))
# airfoil geometry
def read_airfoil(filename):
with open(filename, "r") as f:
lines = f.readlines()
points = [item.strip().split() for item in lines]
points = [[float(item[0]), float(item[1])] for item in points]
return points
def read_data():
data = scipy.io.loadmat("./Data/unsteadyCylinder_full_field.mat")
data_no_airfoil = scipy.io.loadmat("./Data/unsteadyCylinder_no_cylinder.mat")
x = data["x_data"].T
y = data["y_data"].T
x_no_airfoil = data_no_airfoil["x_data"].T
y_no_airfoil = data_no_airfoil["y_data"].T
u = data["u_data"].T
v = data["v_data"].T
p = data["p_data"].T
uu = data["uu_data"].T
uv = data["uv_data"].T
vv = data["vv_data"].T
return x, y, u, v, p, uu, uv, vv, x_no_airfoil, y_no_airfoil
def generate_domain_points(x, y, geometry):
points = []
rs = []
centre_x = 0
centre_y = 0
r = np.sqrt((x-centre_x)**2 + ((y-centre_y)*7)**2)
r = r/(np.max(r)*1)
r = r**0.3
r = 1-r
for i in range(x.shape[0]):
tmp_u = np.random.random()
tmp_r = np.random.random()
if (tmp_r < r[i, 0]) and (tmp_u < 0.05) and geometry.inside([x[i, 0], y[i, 0]]):
points.append([x[i, 0], y[i, 0]])
print(f'Generated {len(points)} points in the domain')
return points
def generate_PIV_points(x, y, u, v, p, x_stride, y_stride, v_ld, v_ru, geometry, plot=False):
""" Generation of PIV points for training """
x_p = deepcopy(x)
y_p = deepcopy(y)
u_p = deepcopy(u)
v_p = deepcopy(v)
p_p = deepcopy(p)
x_p = x_p.reshape(2001,1501).T
y_p = y_p.reshape(2001,1501).T
u_p = u_p.reshape(2001,1501).T
v_p = v_p.reshape(2001,1501).T
p_p = p_p.reshape(2001,1501).T
start_ind_x = int((x_p.shape[1] % x_stride)/2)
start_ind_y = int((x_p.shape[0] % y_stride)/2)
x_p = x_p[start_ind_y::y_stride, start_ind_x::x_stride]
y_p = y_p[start_ind_y::y_stride, start_ind_x::x_stride]
u_p = u_p[start_ind_y::y_stride, start_ind_x::x_stride]
v_p = v_p[start_ind_y::y_stride, start_ind_x::x_stride]
p_p = p_p[start_ind_y::y_stride, start_ind_x::x_stride]
x_p = x_p.T.reshape(-1, 1)
y_p = y_p.T.reshape(-1, 1)
u_p = u_p.T.reshape(-1, 1)
v_p = v_p.T.reshape(-1, 1)
p_p = p_p.T.reshape(-1, 1)
X = []
for i in range(x_p.shape[0]):
if geometry.inside([x_p[i, 0], y_p[i, 0]]) \
and x_p[i, 0] > v_ld[0] and x_p[i, 0] < v_ru[0] \
and y_p[i, 0] > v_ld[1] and y_p[i, 0] < v_ru[1]:
X.append([x_p[i, 0], y_p[i, 0], u_p[i, 0], v_p[i, 0], p_p[i, 0]])
X = np.array(X)
return np.hsplit(X, 5)
def main(train=True, test=True):
# case name
case_name = "unCylinder_Foures_formulation_with_pressure"
case_name_title = r'PIV stride $0.02 \times 0.02$ Foures'
set_directory(case_name)
x_data, y_data, u_data, v_data, p_data, uu_data, uv_data, vv_data, x_domain, y_domain = read_data()
#domain vertices
v_ld = [-1, -1.5]
v_ru = [3, 1.5]
figsize = (10, 8*(v_ru[1]-v_ld[1])/(v_ru[0]-v_ld[0]))
figsize = (8,5)
Nx = int((v_ru[0]-v_ld[0])*500)+1
Ny = int((v_ru[1]-v_ld[1])*500)+1
print('Nx', Nx, 'Ny', Ny)
# geometry specification
geom1 = dde.geometry.Disk(0,0.5)
geom2 = dde.geometry.Rectangle(v_ld, v_ru)
geom = geom2 - geom1
[x_piv, y_piv, u_piv, v_piv, p_piv] = \
generate_PIV_points(x_data, y_data, u_data, v_data,
p_data, 10, 10, v_ld, v_ru, geom, True)
piv_points = np.hstack((x_piv, y_piv))
for i in range(x_data.shape[0]):
if x_data[i,0]==0 and y_data[i,0]==0.5:
p1 = p_data[i,0]
print(p1)
elif x_data[i,0]==0 and y_data[i,0]==-0.5:
p2 = p_data[i,0]
print(p2)
p_coors = np.array([[0, 0.5], [0,-0.5]])
p_val = np.array([[p1], [p2]])
# BC specification
# boundaries functions
def boundary(x, on_boundary):
return on_boundary and not (
np.isclose(x[0], v_ld[0])
or np.isclose(x[0], v_ru[0])
or np.isclose(x[1], v_ld[1])
or np.isclose(x[1], v_ru[1])
)
# BC objects
u_piv_points = dde.PointSetBC(piv_points, u_piv, component=0)
v_piv_points = dde.PointSetBC(piv_points, v_piv, component=1)
pressure_points = dde.PointSetBC(p_coors, p_val, component=2)
bc_wall_u = dde.DirichletBC(geom, func_zeros, boundary, component=0)
bc_wall_v = dde.DirichletBC(geom, func_zeros, boundary, component=1)
bc_wall_fx = dde.DirichletBC(geom, func_zeros, boundary, component=3)
bc_wall_fy = dde.DirichletBC(geom, func_zeros, boundary, component=4)
# custom domain points
domain_points = generate_domain_points(x_domain, y_domain, geometry=geom)
# pde and physics compilation
pde = RANSf02D(150)
if train:
data = dde.data.PDE(
geom,
pde,
[bc_wall_u, bc_wall_v, bc_wall_fx, bc_wall_fy, pressure_points, u_piv_points, v_piv_points],
100,
1600,
solution=None,
num_test=100,
train_distribution="custom",
custom_train_points=domain_points,
)
plot_train_points(data, [4,5, 7], ["airfoil", "pressure", "piv"],
case_name, title=case_name_title, figsize=(10,5))
else:
data = dde.data.PDE(
geom,
pde,
[bc_wall_u, bc_wall_v, bc_wall_fx, bc_wall_fy, u_piv_points, v_piv_points],
100,
100,
solution=None,
num_test=100
)
# exit(0)
# NN model definition
layer_size = [2] + [100] * 7 + [5]
activation = "tanh"
initializer = "Glorot uniform"
net = dde.maps.FNN(layer_size, activation, initializer)
# PINN definition
model = dde.Model(data, net)
if train:
# Adam optimization
loss_weights = [1, 1, 1, 1, 10, 10, 10, 10, 10, 10, 10]
model.compile("adam", lr=0.001, loss_weights=loss_weights)
checkpointer = dde.callbacks.ModelCheckpoint(
f"{case_name}/models/model_{case_name}.ckpt",
verbose=1,
save_better_only=True,
)
loss_update = dde.callbacks.LossUpdateCheckpoint(
momentum=0.7,
verbose=1, period=1, report_period=100,
base_range=[0, 1, 2, 3],
update_range=[ 4, 5, 6, 7, 8, 9, 10]
)
print('Training for 20000 epochs')
losshistory, train_state = model.train(
epochs=20000, callbacks=[checkpointer, loss_update], display_every=100
)
model.save(f"{case_name}/models/model-adam-last")
# L-BFGS-B optimization
model.compile("L-BFGS-B", loss_weights=loss_weights)
losshistory, train_state = model.train()
model.save(f"{case_name}/models/model-bfgs-last")
if test:
model.compile("adam", lr=0.001)
model.compile("L-BFGS-B")
last_epoch = model.train_state.epoch
if not train:
last_epoch=80001
model.restore(f"{case_name}/models/model-bfgs-last-{last_epoch}")
x_plot = np.linspace(v_ld[0], v_ru[0], Nx)
y_plot = np.linspace(v_ld[1], v_ru[1], Ny)
print(x_plot.shape)
print(y_plot.shape)
# domain data
x_data = x_data.reshape(2001,1501).T
y_data = y_data.reshape(2001,1501).T
u_data = u_data.reshape(2001,1501).T
v_data = v_data.reshape(2001,1501).T
p_data = p_data.reshape(2001,1501).T
x_dom = np.linspace(-1, 3, 2001)
y_dom = np.linspace(-1.5, 1.5, 1501)
x_min = np.argmin(np.abs(x_dom-v_ld[0]))
x_max = np.argmin(np.abs(x_dom-v_ru[0]))
y_min = np.argmin(np.abs(y_dom-v_ld[1]))
y_max = np.argmin(np.abs(y_dom-v_ru[1]))
print(x_min, x_max, y_min, y_max)
x_data = x_data[y_min:y_max+1, x_min:x_max+1]
print(x_data.shape)
x_data = x_data.T.reshape(-1,1)
y_data = y_data[y_min:y_max+1, x_min:x_max+1].T.reshape(-1,1)
u_data = u_data[y_min:y_max+1, x_min:x_max+1].T.reshape(-1,1)
v_data = v_data[y_min:y_max+1, x_min:x_max+1].T.reshape(-1,1)
p_data = p_data[y_min:y_max+1, x_min:x_max+1].T.reshape(-1,1)
z = np.array([np.array([i, j]) for i in x_plot for j in y_plot])
y = model.predict(z)
u_star = y[:, 0][:, None]
v_star = y[:, 1][:, None]
p_star = y[:, 2][:, None]
fx_star = y[:, 3][:,None]
fy_star = y[:, 4][:,None]
data_dict = {
"x_data": x_data,
"y_data": y_data,
"u_star": u_star,
"v_star": v_star,
"p_star": p_star,
"fx_star": fx_star,
"fy_star": fy_star
}
scipy.io.savemat(f"{case_name}/results.mat", data_dict)
zero_index = (x_data < 0) & (x_data > 0)
zero_index = zero_index | ((u_data == 0) & (v_data == 0))
no_data_index = zero_index
u_star_data = deepcopy(u_star)
v_star_data = deepcopy(v_star)
p_star_data = deepcopy(p_star)
fx_star_data = deepcopy(fx_star)
fy_star_data = deepcopy(fy_star)
u_star_data[no_data_index] = u_star[no_data_index]*0
v_star_data[no_data_index] = v_star[no_data_index]*0
p_star_data[no_data_index] = p_star[no_data_index]*0
fx_star_data[no_data_index] = fx_star[no_data_index]*0
fy_star_data[no_data_index] = fy_star[no_data_index]*0
u_star_data = u_star_data.reshape(Nx, Ny).T
v_star_data = v_star_data.reshape(Nx, Ny).T
p_star_data = p_star_data.reshape(Nx, Ny).T
fx_star_data = fx_star_data.reshape(Nx, Ny).T
fy_star_data = fy_star_data.reshape(Nx, Ny).T
X, Y = np.meshgrid(x_plot, y_plot)
plt.figure(figsize=figsize)
# plt.title(f'regressed u field for {case_name_title}')
plt.pcolor(X, Y, u_star_data)
plt.colorbar(label='u')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'u_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'regressed v field for {case_name_title}')
plt.pcolor(X, Y, v_star_data)
plt.colorbar(label='v')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'v_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'regressed p field for {case_name_title}')
plt.pcolor(X, Y, p_star_data)
plt.colorbar(label='p')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'p_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'regressed fx field for {case_name_title}')
plt.pcolor(X, Y, fx_star_data)
plt.colorbar(label='fx')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'fx_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'regressed fy field for {case_name_title}')
plt.pcolor(X, Y, fy_star_data)
plt.colorbar(label='fy')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'fy_plot.png'), dpi=400)
plt.close()
# data error
u_star_data = deepcopy(u_star)
v_star_data = deepcopy(v_star)
p_star_data = deepcopy(p_star)
u_star_data[no_data_index] = u_star[no_data_index]*0
v_star_data[no_data_index] = v_star[no_data_index]*0
p_star_data[no_data_index] = p_star[no_data_index]*0
u_star_data = u_star_data.reshape(Nx, Ny).T
v_star_data = v_star_data.reshape(Nx, Ny).T
p_star_data = p_star_data.reshape(Nx, Ny).T
u_true = None
v_true = None
p_true = None
u_true = deepcopy(u_data)
v_true = deepcopy(v_data)
p_true = deepcopy(p_data)
u_true = u_true.reshape(Nx, Ny).T
v_true = v_true.reshape(Nx, Ny).T
p_true = p_true.reshape(Nx, Ny).T
u_err = np.abs(u_true-u_star_data)
v_err = np.abs(v_true-v_star_data)
p_err = np.abs(p_true-p_star_data)
plt.figure(figsize=figsize)
# plt.title(f'u field abs error for {case_name_title}')
plt.pcolor(X, Y, u_err)
plt.colorbar(label='u')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'u_err_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'v field abs error for {case_name_title}')
plt.pcolor(X, Y, v_err)
plt.colorbar(label='v')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'v_err_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'p field abs error for {case_name_title}')
plt.pcolor(X, Y, p_err)
plt.colorbar(label='p')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'p_err_plot.png'), dpi=400)
plt.close()
e = model.predict(z, operator=pde)
e_mass = e[0]
e_u_momentum = e[1]
e_v_momentum = e[2]
f_divergence = e[3]
data_dict.update({
"e_mass": e_mass,
"e_u_momentum": e_u_momentum,
"e_v_momentum": e_v_momentum,
"f_divergence": f_divergence
})
scipy.io.savemat(f"{case_name}/results.mat", data_dict)
e_mass[no_data_index] = e_mass[no_data_index] * 0
e_u_momentum[no_data_index] = e_u_momentum[no_data_index] * 0
e_v_momentum[no_data_index] = e_v_momentum[no_data_index] * 0
f_divergence[no_data_index] = f_divergence[no_data_index] * 0
e_mass = e_mass.reshape(Nx, Ny).T
e_u_momentum = e_u_momentum.reshape(Nx, Ny).T
e_v_momentum = e_v_momentum.reshape(Nx, Ny).T
f_divergence = f_divergence.reshape(Nx, Ny).T
plt.figure(figsize=figsize)
# plt.title(f'mass conservation residual for {case_name_title}')
plt.pcolor(X, Y, e_mass, vmin=-1, vmax=1)
plt.colorbar(label='e_mass')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'e_mass_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'u momentum conservation residual for {case_name_title}')
plt.pcolor(X, Y, e_u_momentum, vmin=-1, vmax=1)
plt.colorbar(label='e_u_momentum')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(
f'{case_name}', 'plots', 'e_u_momentum_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'v momentum conservation residual for {case_name_title}')
plt.pcolor(X, Y, e_v_momentum, vmin=-1, vmax=1)
plt.colorbar(label='e_v_momentum')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(
f'{case_name}', 'plots', 'e_v_momentum_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'fs divergence residual for {case_name_title}')
plt.pcolor(X, Y, f_divergence, vmin=-1, vmax=1)
plt.colorbar(label='f_divergence')
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(
f'{case_name}', 'plots', 'f_divergence_plot.png'), dpi=400)
plt.close()
def curl_f(X,V):
dfsx_y = dde.grad.jacobian(V, X, i=3, j=1)
dfsy_x = dde.grad.jacobian(V, X, i=4, j=0)
return [dfsy_x - dfsx_y]
e = model.predict(z, operator=curl_f)
f_curl = e[0]
data_dict.update({
"curlf": f_curl
})
scipy.io.savemat(f"{case_name}/results.mat", data_dict)
f_curl[no_data_index] = f_curl[no_data_index] * 0
f_curl = f_curl.reshape(Nx, Ny).T
plt.figure(figsize=figsize)
# plt.title(f'curl fs for {case_name_title}')
plt.pcolor(X, Y, f_curl)
plt.colorbar(label=r"$\nabla \times \mathbf{f}$")
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'f_curl_plot.png'), dpi=400)
plt.close()
plt.figure(figsize=figsize)
# plt.title(f'curl fs for {case_name_title}')
plt.pcolor(X, Y, f_curl, vmin=-2.1125, vmax=2.1125)
plt.colorbar(label=r"$\nabla \times \mathbf{f}$")
plt.xlabel('x/c')
plt.ylabel('y/c')
axes=plt.gca()
axes.set_aspect(1)
plt.tight_layout()
plt.savefig(os.path.join(f'{case_name}',
'plots', 'f_curl_plot_rescaled.png'), dpi=400)
plt.close()
if __name__ == "__main__":
train = True
test = True
if "train" in sys.argv and "test" not in sys.argv:
train = True
test = False
if "train" not in sys.argv and "test" in sys.argv:
train = False
test = True
main(train, test)
| 34.732082 | 105 | 0.557756 |
acf5d2c6fcae215ab24c84d2f1025b338eed1ca2 | 16,656 | py | Python | Lib/test/test_format.py | djaldave/laevad-python-2.7.18 | df9aac191d554295db45d638e528880a9ab9a3ec | [
"bzip2-1.0.6"
] | 42 | 2018-12-12T01:00:59.000Z | 2022-03-27T07:32:29.000Z | Lib/test/test_format.py | djaldave/laevad-python-2.7.18 | df9aac191d554295db45d638e528880a9ab9a3ec | [
"bzip2-1.0.6"
] | 13 | 2020-11-06T13:50:45.000Z | 2022-01-25T07:17:37.000Z | Lib/test/test_format.py | djaldave/laevad-python-2.7.18 | df9aac191d554295db45d638e528880a9ab9a3ec | [
"bzip2-1.0.6"
] | 8 | 2020-11-14T04:30:26.000Z | 2021-01-16T17:55:19.000Z | import sys
from test.test_support import verbose, have_unicode, TestFailed
import test.test_support as test_support
import unittest
maxsize = test_support.MAX_Py_ssize_t
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
def testformat(formatstr, args, output=None, limit=None, overflowok=False):
if verbose:
if output:
print "%s %% %s =? %s ..." %\
(repr(formatstr), repr(args), repr(output)),
else:
print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print 'overflow (this is fine)'
else:
if output and limit is None and result != output:
if verbose:
print 'no'
raise AssertionError("%r %% %r == %r != %r" %
(formatstr, args, result, output))
# when 'limit' is specified, it determines how many characters
# must match exactly; lengths must always match.
# ex: limit=5, '12345678' matches '12345___'
# (mainly for floating point format tests for which an exact match
# can't be guaranteed due to rounding and representation errors)
elif output and limit is not None and (
len(result)!=len(output) or result[:limit]!=output[:limit]):
if verbose:
print 'no'
print "%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output))
else:
if verbose:
print 'yes'
def testboth(formatstr, *args, **kwargs):
testformat(formatstr, *args, **kwargs)
if have_unicode:
testformat(unicode(formatstr), *args, **kwargs)
class FormatTest(unittest.TestCase):
def test_format(self):
testboth("%.1d", (1,), "1")
testboth("%.*d", (sys.maxint,1), overflowok=True) # expect overflow
testboth("%.100d", (1,), '00000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000001', overflowok=True)
testboth("%#.117x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000001',
overflowok=True)
testboth("%#.118x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000001',
overflowok=True)
testboth("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testboth("%#.*g", (109, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testboth('%12.*f', (123456, 1.0))
# check for internal overflow validation on length of precision
# these tests should no longer cause overflow in Python
# 2.7/3.1 and later.
testboth("%#.*g", (110, -1.e+100/3.))
testboth("%#.*G", (110, -1.e+100/3.))
testboth("%#.*f", (110, -1.e+100/3.))
testboth("%#.*F", (110, -1.e+100/3.))
# Formatting of long integers. Overflow is not ok
testboth("%x", 10L, "a")
testboth("%x", 100000000000L, "174876e800")
testboth("%o", 10L, "12")
testboth("%o", 100000000000L, "1351035564000")
testboth("%d", 10L, "10")
testboth("%d", 100000000000L, "100000000000")
big = 123456789012345678901234567890L
testboth("%d", big, "123456789012345678901234567890")
testboth("%d", -big, "-123456789012345678901234567890")
testboth("%5d", -big, "-123456789012345678901234567890")
testboth("%31d", -big, "-123456789012345678901234567890")
testboth("%32d", -big, " -123456789012345678901234567890")
testboth("%-32d", -big, "-123456789012345678901234567890 ")
testboth("%032d", -big, "-0123456789012345678901234567890")
testboth("%-032d", -big, "-123456789012345678901234567890 ")
testboth("%034d", -big, "-000123456789012345678901234567890")
testboth("%034d", big, "0000123456789012345678901234567890")
testboth("%0+34d", big, "+000123456789012345678901234567890")
testboth("%+34d", big, " +123456789012345678901234567890")
testboth("%34d", big, " 123456789012345678901234567890")
testboth("%.2d", big, "123456789012345678901234567890")
testboth("%.30d", big, "123456789012345678901234567890")
testboth("%.31d", big, "0123456789012345678901234567890")
testboth("%32.31d", big, " 0123456789012345678901234567890")
testboth("%d", float(big), "123456________________________", 6)
big = 0x1234567890abcdef12345L # 21 hex digits
testboth("%x", big, "1234567890abcdef12345")
testboth("%x", -big, "-1234567890abcdef12345")
testboth("%5x", -big, "-1234567890abcdef12345")
testboth("%22x", -big, "-1234567890abcdef12345")
testboth("%23x", -big, " -1234567890abcdef12345")
testboth("%-23x", -big, "-1234567890abcdef12345 ")
testboth("%023x", -big, "-01234567890abcdef12345")
testboth("%-023x", -big, "-1234567890abcdef12345 ")
testboth("%025x", -big, "-0001234567890abcdef12345")
testboth("%025x", big, "00001234567890abcdef12345")
testboth("%0+25x", big, "+0001234567890abcdef12345")
testboth("%+25x", big, " +1234567890abcdef12345")
testboth("%25x", big, " 1234567890abcdef12345")
testboth("%.2x", big, "1234567890abcdef12345")
testboth("%.21x", big, "1234567890abcdef12345")
testboth("%.22x", big, "01234567890abcdef12345")
testboth("%23.22x", big, " 01234567890abcdef12345")
testboth("%-23.22x", big, "01234567890abcdef12345 ")
testboth("%X", big, "1234567890ABCDEF12345")
testboth("%#X", big, "0X1234567890ABCDEF12345")
testboth("%#x", big, "0x1234567890abcdef12345")
testboth("%#x", -big, "-0x1234567890abcdef12345")
testboth("%#.23x", -big, "-0x001234567890abcdef12345")
testboth("%#+.23x", big, "+0x001234567890abcdef12345")
testboth("%# .23x", big, " 0x001234567890abcdef12345")
testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
testboth("%x", float(big), "123456_______________", 6)
big = 012345670123456701234567012345670L # 32 octal digits
testboth("%o", big, "12345670123456701234567012345670")
testboth("%o", -big, "-12345670123456701234567012345670")
testboth("%5o", -big, "-12345670123456701234567012345670")
testboth("%33o", -big, "-12345670123456701234567012345670")
testboth("%34o", -big, " -12345670123456701234567012345670")
testboth("%-34o", -big, "-12345670123456701234567012345670 ")
testboth("%034o", -big, "-012345670123456701234567012345670")
testboth("%-034o", -big, "-12345670123456701234567012345670 ")
testboth("%036o", -big, "-00012345670123456701234567012345670")
testboth("%036o", big, "000012345670123456701234567012345670")
testboth("%0+36o", big, "+00012345670123456701234567012345670")
testboth("%+36o", big, " +12345670123456701234567012345670")
testboth("%36o", big, " 12345670123456701234567012345670")
testboth("%.2o", big, "12345670123456701234567012345670")
testboth("%.32o", big, "12345670123456701234567012345670")
testboth("%.33o", big, "012345670123456701234567012345670")
testboth("%34.33o", big, " 012345670123456701234567012345670")
testboth("%-34.33o", big, "012345670123456701234567012345670 ")
testboth("%o", big, "12345670123456701234567012345670")
testboth("%#o", big, "012345670123456701234567012345670")
testboth("%#o", -big, "-012345670123456701234567012345670")
testboth("%#.34o", -big, "-0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%# .34o", big, " 0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+37.34o", big, "+0012345670123456701234567012345670 ")
testboth("%#+37.34o", big, " +0012345670123456701234567012345670")
# next one gets one leading zero from precision
testboth("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testboth("%#.33o", big, "012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testboth("%#.32o", big, "012345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testboth("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testboth("%0#34.33o", big, "0012345670123456701234567012345670")
testboth("%o", float(big), "123456__________________________", 6)
# Some small ints, in both Python int and long flavors).
testboth("%d", 42, "42")
testboth("%d", -42, "-42")
testboth("%d", 42L, "42")
testboth("%d", -42L, "-42")
testboth("%d", 42.0, "42")
testboth("%#x", 1, "0x1")
testboth("%#x", 1L, "0x1")
testboth("%#X", 1, "0X1")
testboth("%#X", 1L, "0X1")
testboth("%#x", 1.0, "0x1")
testboth("%#o", 1, "01")
testboth("%#o", 1L, "01")
testboth("%#o", 0, "0")
testboth("%#o", 0L, "0")
testboth("%o", 0, "0")
testboth("%o", 0L, "0")
testboth("%d", 0, "0")
testboth("%d", 0L, "0")
testboth("%#x", 0, "0x0")
testboth("%#x", 0L, "0x0")
testboth("%#X", 0, "0X0")
testboth("%#X", 0L, "0X0")
testboth("%x", 0x42, "42")
testboth("%x", -0x42, "-42")
testboth("%x", 0x42L, "42")
testboth("%x", -0x42L, "-42")
testboth("%x", float(0x42), "42")
testboth("%o", 042, "42")
testboth("%o", -042, "-42")
testboth("%o", 042L, "42")
testboth("%o", -042L, "-42")
testboth("%o", float(042), "42")
# alternate float formatting
testformat('%g', 1.1, '1.1')
testformat('%#g', 1.1, '1.10000')
# Regression test for http://bugs.python.org/issue15516.
class IntFails(object):
def __int__(self):
raise TestFailed
def __long__(self):
return 0
fst = IntFails()
testformat("%x", fst, '0')
testformat(u"%x", fst, '0')
# Test exception for unknown format characters
if verbose:
print 'Testing exceptions'
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception, exc:
if str(exc) == excmsg:
if verbose:
print "yes"
else:
if verbose: print 'no'
print 'Unexpected ', exception, ':', repr(str(exc))
except:
if verbose: print 'no'
print 'Unexpected exception'
raise
else:
raise TestFailed, 'did not get expected exception: %s' % excmsg
test_exc('abc %a', 1, ValueError,
"unsupported format character 'a' (0x61) at index 5")
if have_unicode:
test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
"unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "%d format: a number is required, not str")
test_exc('%g', '1', TypeError, "float argument required, not str")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', u'1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', u'1', TypeError,
"not all arguments converted during string formatting")
class Foobar(long):
def __oct__(self):
# Returning a non-string should not blow up.
return self + 1
test_exc('%o', Foobar(), TypeError,
"expected string or Unicode object, long found")
if maxsize == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(maxsize, -127)
except MemoryError:
pass
else:
raise TestFailed, '"%*d"%(maxsize, -127) should fail'
def test_invalid_special_methods(self):
tests = []
for f in 'sriduoxXfge':
tests.append(('%' + f, 1, TypeError))
tests.append(('%#' + f, 1, TypeError))
for r in ['', '-', 'L', '-L']:
for f in 'iduoxX':
tests.append(('%' + f, r, ValueError))
tests.append(('%#' + f, r, ValueError))
tests.append(('%o', 'abc', ValueError))
for r in ('abc', '0abc', '0x', '0xL'):
for f in 'xX':
tests.append(('%' + f, r, ValueError))
for r in ('0x', '0xL'):
for f in 'xX':
tests.append(('%#' + f, r, ValueError))
class X(long):
def __repr__(self):
return result
def __str__(self):
return result
def __oct__(self):
return result
def __hex__(self):
return result
def __float__(self):
return result
for fmt, result, exc in tests:
try:
fmt % X()
except exc:
pass
else:
self.fail('%s not raised for %r format of %r' %
(exc.__name__, fmt, result))
def test_main():
test_support.run_unittest(FormatTest)
def test_precision(self):
f = 1.2
self.assertEqual(format(f, ".0f"), "1")
self.assertEqual(format(f, ".3f"), "1.200")
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (sys.maxsize + 1))
self.assertEqual(str(cm.exception), "precision too big")
c = complex(f)
self.assertEqual(format(c, ".0f"), "1+0j")
self.assertEqual(format(c, ".3f"), "1.200+0.000j")
with self.assertRaises(ValueError) as cm:
format(c, ".%sf" % (sys.maxsize + 1))
self.assertEqual(str(cm.exception), "precision too big")
@test_support.cpython_only
def test_precision_c_limits(self):
from _testcapi import INT_MAX
f = 1.2
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (INT_MAX + 1))
c = complex(f)
with self.assertRaises(ValueError) as cm:
format(c, ".%sf" % (INT_MAX + 1))
if __name__ == "__main__":
unittest.main()
| 44.534759 | 83 | 0.560399 |
acf5d37abe39c2a0f25e9de12d7d16dcc0714deb | 2,779 | py | Python | payparts/use_cases.py | LowerDeez/ok-payparts | 92623deaaeae9a6f321a76ee8dacf1f3911d7cbb | [
"MIT"
] | null | null | null | payparts/use_cases.py | LowerDeez/ok-payparts | 92623deaaeae9a6f321a76ee8dacf1f3911d7cbb | [
"MIT"
] | null | null | null | payparts/use_cases.py | LowerDeez/ok-payparts | 92623deaaeae9a6f321a76ee8dacf1f3911d7cbb | [
"MIT"
] | 1 | 2022-02-03T01:48:14.000Z | 2022-02-03T01:48:14.000Z | from typing import Dict
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from payparts.payparts import PayPartsAPIAdapter
from payparts.consts import DEFAULT_MERCHANT_TYPE, DEFAULT_PARTS_COUNT
from payparts.exceptions import InvalidTokenError
from payparts.forms import PayloadForm, ProductForm
from payparts.models import Log
from payparts.signals import (
pay_parts_invalid_callback,
pay_parts_success_callback
)
__all__ = (
'GetRedirectUrlUseCase',
'ProcessCallbackUseCase'
)
class GetRedirectUrlUseCase:
"""
Use case to create payment and build redirect url to perform payment
"""
@staticmethod
def raise_errors(form) -> None:
if not form.is_valid():
raise ValidationError(form.errors)
def validate(self, data: Dict) -> None:
products = data.get('products')
if not products:
raise ValidationError(
_('You must provide products to create payment.'))
self.raise_errors(PayloadForm(data=data))
for product in products:
self.raise_errors(ProductForm(data=product))
def execute(self, data) -> str:
data['parts_count'] = (
data.get('parts_count') or
DEFAULT_PARTS_COUNT
)
data['merchant_type'] = (
data.get('merchant_type') or
DEFAULT_MERCHANT_TYPE
)
self.validate(data)
order_data = {
'order_id': data.pop('order_id'),
'amount': data.pop('amount'),
'products': data.pop('products')
}
adapter = PayPartsAPIAdapter(**data)
result = adapter.payment_create(order_data)
adapter.create_log(result, 'payment_create')
token = result.get('token')
if token:
return adapter.get_redirect_url(token)
raise InvalidTokenError(
code='token',
message=(
f'Invalid token. '
f'State: {result.get("state", "")}. '
f'Error: {result.get("message", "")}'
)
)
class ProcessCallbackUseCase:
"""
Use case to process PayParts callback
"""
def execute(self, request, data) -> None:
data['state'] = data.pop('paymentState')
adapter = PayPartsAPIAdapter()
log = adapter.create_log(data, 'callback')
is_valid = adapter.validate_signature(data)
if is_valid:
pay_parts_success_callback.send(
sender=Log,
log=log,
request=request
)
else:
pay_parts_invalid_callback.send(
sender=Log,
log=log,
request=request
)
| 26.721154 | 72 | 0.596977 |
acf5d439d815fe2067d306a42603b1e92813613a | 125 | py | Python | anime33/algo/urls.py | Wingtail/thirty-three-anime | c3719b8228d9aa14a4c31ab23984811d3920e8d5 | [
"MIT"
] | 1 | 2021-02-06T16:26:48.000Z | 2021-02-06T16:26:48.000Z | anime33/algo/urls.py | Wingtail/thirty-three-anime | c3719b8228d9aa14a4c31ab23984811d3920e8d5 | [
"MIT"
] | 1 | 2021-02-20T16:37:16.000Z | 2021-02-20T16:37:16.000Z | anime33/algo/urls.py | Pie31415/thirty-three-anime | c3719b8228d9aa14a4c31ab23984811d3920e8d5 | [
"MIT"
] | 2 | 2021-02-18T03:16:25.000Z | 2021-03-08T02:40:57.000Z | from django.urls import path
from . import views
urlpatterns = [
path('recommend', views.recommend, name='recommend')
] | 17.857143 | 56 | 0.72 |
acf5d4e1a92a42b9472156521a63f0eacbd31ecd | 39,917 | py | Python | bioptim/optimization/optimal_control_program.py | Naassila/bioptim | 511e7ba315de5ca8c3bdcc85decd43bac30676b9 | [
"MIT"
] | null | null | null | bioptim/optimization/optimal_control_program.py | Naassila/bioptim | 511e7ba315de5ca8c3bdcc85decd43bac30676b9 | [
"MIT"
] | null | null | null | bioptim/optimization/optimal_control_program.py | Naassila/bioptim | 511e7ba315de5ca8c3bdcc85decd43bac30676b9 | [
"MIT"
] | null | null | null | from typing import Union, Callable, Any
import os
import pickle
from copy import deepcopy
from math import inf
import biorbd_casadi as biorbd
import casadi
from casadi import MX, SX
import numpy as np
from .non_linear_program import NonLinearProgram as NLP
from .optimization_vector import OptimizationVector
from ..dynamics.configure_problem import DynamicsList, Dynamics
from ..dynamics.ode_solver import OdeSolver, OdeSolverBase
from ..dynamics.configure_problem import ConfigureProblem
from ..gui.plot import CustomPlot, PlotOcp
from ..gui.graph import OcpToConsole, OcpToGraph
from ..interfaces.biorbd_interface import BiorbdInterface
from ..limits.constraints import ConstraintFunction, ConstraintFcn, ConstraintList, Constraint, ContinuityFunctions
from ..limits.phase_transition import PhaseTransitionList
from ..limits.objective_functions import ObjectiveFcn, ObjectiveList, Objective
from ..limits.path_conditions import BoundsList, Bounds
from ..limits.path_conditions import InitialGuess, InitialGuessList
from ..limits.path_conditions import InterpolationType
from ..limits.penalty import PenaltyOption
from ..limits.objective_functions import ObjectiveFunction
from ..misc.__version__ import __version__
from ..misc.enums import ControlType, Solver, Shooting
from ..misc.mapping import BiMappingList, Mapping
from ..misc.utils import check_version
from ..optimization.parameters import ParameterList, Parameter
from ..optimization.solution import Solution
check_version(biorbd, "1.7.1", "1.8.0")
class OptimalControlProgram:
"""
The main class to define an ocp. This class prepares the full program and gives all
the needed interface to modify and solve the program
Attributes
----------
cx: [MX, SX]
The base type for the symbolic casadi variables
g: list
Constraints that are not phase dependent (mostly parameters and continuity constraints)
g_internal: list[list[Constraint]]
All the constraints internally defined by the OCP at each of the node of the phase
J: list
Objective values that are not phase dependent (mostly parameters)
isdef_x_init: bool
If the initial condition of the states are set
isdef_x_bounds: bool
If the bounds of the states are set
isdef_u_init: bool
If the initial condition of the controls are set
isdef_u_bounds: bool
If the bounds of the controls are set
nlp: NLP
All the phases of the ocp
n_phases: Union[int, list, tuple]
The number of phases of the ocp
n_threads: int
The number of thread to use if using multithreading
original_phase_time: list[float]
The time vector as sent by the user
original_values: dict
A copy of the ocp as it is after defining everything
phase_transitions: list[PhaseTransition]
The list of transition constraint between phases
solver: SolverInterface
A reference to the ocp solver
solver_type: Solver
The designated solver to solve the ocp
v: OptimizationVector
The variable optimization holder
version: dict
The version of all the underlying software. This is important when loading a previous ocp
Methods
-------
update_objectives(self, new_objective_function: Union[Objective, ObjectiveList])
The main user interface to add or modify objective functions in the ocp
update_objectives_target(self, target, phase=None, list_index=None)
Fast accessor to update the target of a specific objective function. To update target of global objective
(usually defined by parameters), one can pass 'phase=-1
update_constraints(self, new_constraint: Union[Constraint, ConstraintList])
The main user interface to add or modify constraint in the ocp
update_parameters(self, new_parameters: Union[Parameter, ParameterList])
The main user interface to add or modify parameters in the ocp
update_bounds(self, x_bounds: Union[Bounds, BoundsList], u_bounds: Union[Bounds, BoundsList])
The main user interface to add bounds in the ocp
update_initial_guess(
self,
x_init: Union[InitialGuess, InitialGuessList],
u_init: Union[InitialGuess, InitialGuessList],
param_init: Union[InitialGuess, InitialGuessList],
)
The main user interface to add initial guesses in the ocp
add_plot(self, fig_name: str, update_function: Callable, phase: int = -1, **parameters: Any)
The main user interface to add a new plot to the ocp
prepare_plots(self, automatically_organize: bool, show_bounds: bool,
shooting_type: Shooting) -> PlotOCP
Create all the plots associated with the OCP
solve(self, solver: Solver, show_online_optim: bool, solver_options: dict) -> Solution
Call the solver to actually solve the ocp
save(self, sol: Solution, file_path: str, stand_alone: bool = False)
Save the ocp and solution structure to the hard drive. It automatically create the required
folder if it does not exists. Please note that biorbd is required to load back this structure.
@staticmethod
load(file_path: str) -> list
Reload a previous optimization (*.bo) saved using save
_define_time(self, phase_time: Union[float, tuple], objective_functions: ObjectiveList, constraints: ConstraintList)
Declare the phase_time vector in v. If objective_functions or constraints defined a time optimization,
a sanity check is perform and the values of initial guess and bounds for these particular phases
__modify_penalty(self, new_penalty: Union[PenaltyOption, Parameter])
The internal function to modify a penalty. It is also stored in the original_values, meaning that if one
overrides an objective only the latter is preserved when saved
"""
def __init__(
self,
biorbd_model: Union[str, biorbd.Model, list, tuple],
dynamics: Union[Dynamics, DynamicsList],
n_shooting: Union[int, list, tuple],
phase_time: Union[int, float, list, tuple],
x_init: Union[InitialGuess, InitialGuessList] = None,
u_init: Union[InitialGuess, InitialGuessList] = None,
x_bounds: Union[Bounds, BoundsList] = None,
u_bounds: Union[Bounds, BoundsList] = None,
objective_functions: Union[Objective, ObjectiveList] = None,
constraints: Union[Constraint, ConstraintList] = None,
parameters: Union[Parameter, ParameterList] = None,
external_forces: Union[list, tuple] = None,
ode_solver: Union[list, OdeSolverBase, OdeSolver] = None,
control_type: Union[ControlType, list] = ControlType.CONSTANT,
variable_mappings: BiMappingList = None,
plot_mappings: Mapping = None,
phase_transitions: PhaseTransitionList = None,
n_threads: int = 1,
use_sx: bool = False,
skip_continuity: bool = False,
):
"""
Parameters
----------
biorbd_model: Union[str, biorbd.Model, list, tuple]
The biorbd model. If biorbd_model is an str, a new model is loaded. Otherwise, the references are used
dynamics: Union[Dynamics, DynamicsList]
The dynamics of the phases
n_shooting: Union[int, list[int]]
The number of shooting point of the phases
phase_time: Union[int, float, list, tuple]
The phase time of the phases
x_init: Union[InitialGuess, InitialGuessList]
The initial guesses for the states
u_init: Union[InitialGuess, InitialGuessList]
The initial guesses for the controls
x_bounds: Union[Bounds, BoundsList]
The bounds for the states
u_bounds: Union[Bounds, BoundsList]
The bounds for the controls
objective_functions: Union[Objective, ObjectiveList]
All the objective function of the program
constraints: Union[Constraint, ConstraintList]
All the constraints of the program
parameters: Union[Parameter, ParameterList]
All the parameters to optimize of the program
external_forces: Union[list, tuple]
The external forces acting on the center of mass of the segments specified in the bioMod
ode_solver: OdeSolverBase
The solver for the ordinary differential equations
control_type: ControlType
The type of controls for each phase
variable_mappings: BiMappingList
The mapping to apply on variables
plot_mappings: Mapping
The mapping to apply on the plots
phase_transitions: PhaseTransitionList
The transition types between the phases
n_threads: int
The number of thread to use while solving (multi-threading if > 1)
use_sx: bool
The nature of the casadi variables. MX are used if False.
skip_continuity: bool
This is mainly for internal purposes when creating an OCP not destined to be solved
"""
if isinstance(biorbd_model, str):
biorbd_model = [biorbd.Model(biorbd_model)]
elif isinstance(biorbd_model, biorbd.biorbd.Model):
biorbd_model = [biorbd_model]
elif isinstance(biorbd_model, (list, tuple)):
biorbd_model = [biorbd.Model(m) if isinstance(m, str) else m for m in biorbd_model]
else:
raise RuntimeError("biorbd_model must either be a string or an instance of biorbd.Model()")
self.version = {"casadi": casadi.__version__, "biorbd": biorbd.__version__, "bioptim": __version__}
self.n_phases = len(biorbd_model)
biorbd_model_path = [m.path().relativePath().to_string() for m in biorbd_model]
if isinstance(dynamics, Dynamics):
dynamics_type_tp = DynamicsList()
dynamics_type_tp.add(dynamics)
dynamics = dynamics_type_tp
elif not isinstance(dynamics, DynamicsList):
raise RuntimeError("dynamics should be a Dynamics or a DynamicsList")
self.original_values = {
"biorbd_model": biorbd_model_path,
"dynamics": dynamics,
"n_shooting": n_shooting,
"phase_time": phase_time,
"x_init": x_init,
"u_init": u_init,
"x_bounds": x_bounds,
"u_bounds": u_bounds,
"objective_functions": ObjectiveList(),
"constraints": ConstraintList(),
"parameters": ParameterList(),
"external_forces": external_forces,
"ode_solver": ode_solver,
"control_type": control_type,
"variable_mappings": variable_mappings,
"plot_mappings": plot_mappings,
"phase_transitions": phase_transitions,
"n_threads": n_threads,
"use_sx": use_sx,
}
# Check integrity of arguments
if not isinstance(n_threads, int) or isinstance(n_threads, bool) or n_threads < 1:
raise RuntimeError("n_threads should be a positive integer greater or equal than 1")
ns = n_shooting
if not isinstance(ns, int) or ns < 2:
if isinstance(ns, (tuple, list)):
if sum([True for i in ns if not isinstance(i, int) and not isinstance(i, bool)]) != 0:
raise RuntimeError("n_shooting should be a positive integer (or a list of) greater or equal than 2")
else:
raise RuntimeError("n_shooting should be a positive integer (or a list of) greater or equal than 2")
if not isinstance(phase_time, (int, float)):
if isinstance(phase_time, (tuple, list)):
if sum([True for i in phase_time if not isinstance(i, (int, float))]) != 0:
raise RuntimeError("phase_time should be a number or a list of number")
else:
raise RuntimeError("phase_time should be a number or a list of number")
if x_bounds is None:
x_bounds = BoundsList()
elif isinstance(x_bounds, Bounds):
x_bounds_tp = BoundsList()
x_bounds_tp.add(bounds=x_bounds)
x_bounds = x_bounds_tp
elif not isinstance(x_bounds, BoundsList):
raise RuntimeError("x_bounds should be built from a Bounds or a BoundsList")
if u_bounds is None:
u_bounds = BoundsList()
elif isinstance(u_bounds, Bounds):
u_bounds_tp = BoundsList()
u_bounds_tp.add(bounds=u_bounds)
u_bounds = u_bounds_tp
elif not isinstance(u_bounds, BoundsList):
raise RuntimeError("u_bounds should be built from a Bounds or a BoundsList")
if x_init is None:
x_init = InitialGuessList()
elif isinstance(x_init, InitialGuess):
x_init_tp = InitialGuessList()
x_init_tp.add(x_init)
x_init = x_init_tp
elif not isinstance(x_init, InitialGuessList):
raise RuntimeError("x_init should be built from a InitialGuess or InitialGuessList")
if u_init is None:
u_init = InitialGuessList()
elif isinstance(u_init, InitialGuess):
u_init_tp = InitialGuessList()
u_init_tp.add(u_init)
u_init = u_init_tp
elif not isinstance(u_init, InitialGuessList):
raise RuntimeError("u_init should be built from a InitialGuess or InitialGuessList")
if objective_functions is None:
objective_functions = ObjectiveList()
elif isinstance(objective_functions, Objective):
objective_functions_tp = ObjectiveList()
objective_functions_tp.add(objective_functions)
objective_functions = objective_functions_tp
elif not isinstance(objective_functions, ObjectiveList):
raise RuntimeError("objective_functions should be built from an Objective or ObjectiveList")
if constraints is None:
constraints = ConstraintList()
elif isinstance(constraints, Constraint):
constraints_tp = ConstraintList()
constraints_tp.add(constraints)
constraints = constraints_tp
elif not isinstance(constraints, ConstraintList):
raise RuntimeError("constraints should be built from an Constraint or ConstraintList")
if parameters is None:
parameters = ParameterList()
elif not isinstance(parameters, ParameterList):
raise RuntimeError("parameters should be built from an ParameterList")
if phase_transitions is None:
phase_transitions = PhaseTransitionList()
elif not isinstance(phase_transitions, PhaseTransitionList):
raise RuntimeError("phase_transitions should be built from an PhaseTransitionList")
if ode_solver is None:
ode_solver = OdeSolver.RK4()
elif not isinstance(ode_solver, OdeSolverBase):
raise RuntimeError("ode_solver should be built an instance of OdeSolver")
if not isinstance(use_sx, bool):
raise RuntimeError("use_sx should be a bool")
# Type of CasADi graph
if use_sx:
self.cx = SX
else:
self.cx = MX
# Declare optimization variables
self.J = []
self.J_internal = []
self.g = []
self.g_internal = []
self.v = OptimizationVector(self)
# nlp is the core of a phase
self.nlp = [NLP() for _ in range(self.n_phases)]
NLP.add(self, "model", biorbd_model, False)
NLP.add(self, "phase_idx", [i for i in range(self.n_phases)], False)
# Define some aliases
NLP.add(self, "ns", n_shooting, False)
for nlp in self.nlp:
if nlp.ns < 1:
raise RuntimeError("Number of shooting points must be at least 1")
self.n_threads = n_threads
NLP.add(self, "n_threads", n_threads, True)
self.solver_type = Solver.NONE
self.solver = None
# External forces
if external_forces is not None:
external_forces = BiorbdInterface.convert_array_to_external_forces(external_forces)
NLP.add(self, "external_forces", external_forces, False)
plot_mappings = plot_mappings if plot_mappings is not None else {}
reshaped_plot_mappings = []
for i in range(self.n_phases):
reshaped_plot_mappings.append({})
for key in plot_mappings:
reshaped_plot_mappings[i][key] = plot_mappings[key][i]
NLP.add(self, "plot_mapping", reshaped_plot_mappings, False, name="plot_mapping")
# Prepare the parameters to optimize
self.phase_transitions = []
if len(parameters) > 0:
self.update_parameters(parameters)
# Declare the time to optimize
self._define_time(phase_time, objective_functions, constraints)
# Prepare path constraints and dynamics of the program
NLP.add(self, "dynamics_type", dynamics, False)
NLP.add(self, "ode_solver", ode_solver, True)
NLP.add(self, "control_type", control_type, True)
# Prepare the variable mappings
if variable_mappings is None:
variable_mappings = BiMappingList()
NLP.add(self, "variable_mappings", variable_mappings, True)
# Prepare the dynamics
for i in range(self.n_phases):
self.nlp[i].initialize(self.cx)
ConfigureProblem.initialize(self, self.nlp[i])
if (
self.nlp[0].states.shape != self.nlp[i].states.shape
or self.nlp[0].controls.shape != self.nlp[i].controls.shape
):
raise RuntimeError("Dynamics with different nx or nu is not supported yet")
self.nlp[i].ode_solver.prepare_dynamic_integrator(self, self.nlp[i])
# Define the actual NLP problem
self.v.define_ocp_shooting_points()
# Define continuity constraints
# Prepare phase transitions (Reminder, it is important that parameters are declared before,
# otherwise they will erase the phase_transitions)
self.phase_transitions = phase_transitions.prepare_phase_transitions(self)
# Skipping creates a valid but unsolvable OCP class
if not skip_continuity:
# Inner- and inter-phase continuity
ContinuityFunctions.continuity(self)
self.isdef_x_init = False
self.isdef_u_init = False
self.isdef_x_bounds = False
self.isdef_u_bounds = False
self.update_bounds(x_bounds, u_bounds)
self.update_initial_guess(x_init, u_init)
# Prepare constraints
self.update_constraints(constraints)
# Prepare objectives
self.update_objectives(objective_functions)
def update_objectives(self, new_objective_function: Union[Objective, ObjectiveList]):
"""
The main user interface to add or modify objective functions in the ocp
Parameters
----------
new_objective_function: Union[Objective, ObjectiveList]
The objective to add to the ocp
"""
if isinstance(new_objective_function, Objective):
self.__modify_penalty(new_objective_function)
elif isinstance(new_objective_function, ObjectiveList):
for objective_in_phase in new_objective_function:
for objective in objective_in_phase:
self.__modify_penalty(objective)
else:
raise RuntimeError("new_objective_function must be a Objective or an ObjectiveList")
def update_objectives_target(self, target, phase=None, list_index=None):
"""
Fast accessor to update the target of a specific objective function. To update target of global objective
(usually defined by parameters), one can pass 'phase=-1'
Parameters
----------
target: np.ndarray
The new target of the objective function. The last dimension must be the number of frames
phase: int
The phase the objective is in. None is interpreted as zero if the program has one phase. The value -1
changes the values of ocp.J
list_index: int
The objective index
"""
if phase is None and len(self.nlp) == 1:
phase = 0
if list_index is None:
raise ValueError("'phase' must be defined")
ObjectiveFunction.update_target(self.nlp[phase] if phase >= 0 else self, list_index, target)
def update_constraints(self, new_constraint: Union[Constraint, ConstraintList]):
"""
The main user interface to add or modify constraint in the ocp
Parameters
----------
new_constraint: Union[Constraint, ConstraintList]
The constraint to add to the ocp
"""
if isinstance(new_constraint, Constraint):
self.__modify_penalty(new_constraint)
elif isinstance(new_constraint, ConstraintList):
for constraints_in_phase in new_constraint:
for constraint in constraints_in_phase:
self.__modify_penalty(constraint)
else:
raise RuntimeError("new_constraint must be a Constraint or a ConstraintList")
def update_parameters(self, new_parameters: Union[Parameter, ParameterList]):
"""
The main user interface to add or modify parameters in the ocp
Parameters
----------
new_parameters: Union[Parameter, ParameterList]
The parameters to add to the ocp
"""
if isinstance(new_parameters, Parameter):
self.__modify_penalty(new_parameters)
elif isinstance(new_parameters, ParameterList):
for parameter in new_parameters:
self.__modify_penalty(parameter)
else:
raise RuntimeError("new_parameter must be a Parameter or a ParameterList")
def update_bounds(
self, x_bounds: Union[Bounds, BoundsList] = BoundsList(), u_bounds: Union[Bounds, BoundsList] = BoundsList()
):
"""
The main user interface to add bounds in the ocp
Parameters
----------
x_bounds: Union[Bounds, BoundsList]
The state bounds to add
u_bounds: Union[Bounds, BoundsList]
The control bounds to add
"""
if x_bounds:
NLP.add_path_condition(self, x_bounds, "x_bounds", Bounds, BoundsList)
if u_bounds:
NLP.add_path_condition(self, u_bounds, "u_bounds", Bounds, BoundsList)
if self.isdef_x_bounds and self.isdef_u_bounds:
self.v.define_ocp_bounds()
for nlp in self.nlp:
for key in nlp.states.keys():
nlp.plot[f"{key}_states"].bounds = nlp.x_bounds[nlp.states[key].index]
for key in nlp.controls.keys():
nlp.plot[f"{key}_controls"].bounds = nlp.u_bounds[nlp.controls[key].index]
def update_initial_guess(
self,
x_init: Union[InitialGuess, InitialGuessList] = InitialGuessList(),
u_init: Union[InitialGuess, InitialGuessList] = InitialGuessList(),
param_init: Union[InitialGuess, InitialGuessList] = InitialGuessList(),
):
"""
The main user interface to add initial guesses in the ocp
Parameters
----------
x_init: Union[Bounds, BoundsList]
The state initial guess to add
u_init: Union[Bounds, BoundsList]
The control initial guess to add
param_init: Union[Bounds, BoundsList]
The parameters initial guess to add
"""
if x_init:
NLP.add_path_condition(self, x_init, "x_init", InitialGuess, InitialGuessList)
if u_init:
NLP.add_path_condition(self, u_init, "u_init", InitialGuess, InitialGuessList)
if isinstance(param_init, InitialGuess):
param_init_list = InitialGuessList()
param_init_list.add(param_init)
else:
param_init_list = param_init
for param in param_init_list:
if not param.name:
raise ValueError("update_initial_guess must specify a name for the parameters")
try:
idx = self.v.parameters_in_list.index(param.name)
self.v.parameters_in_list[idx].initial_guess.init = param.init
except ValueError:
raise ValueError("update_initial_guess cannot declare new parameters")
if self.isdef_x_init and self.isdef_u_init:
self.v.define_ocp_initial_guess()
def add_plot(self, fig_name: str, update_function: Callable, phase: int = -1, **parameters: Any):
"""
The main user interface to add a new plot to the ocp
Parameters
----------
fig_name: str
The name of the figure, it the name already exists, it is merged
update_function: Callable
The update function callable using f(states, controls, parameters, **parameters)
phase: int
The phase to add the plot to. -1 is the last
parameters: dict
Any parameters to pass to the update_function
"""
if "combine_to" in parameters:
raise RuntimeError(
"'combine_to' cannot be specified in add_plot, please use same 'fig_name' to combine plots"
)
# --- Solve the program --- #
if len(self.nlp) == 1:
phase = 0
else:
if phase < 0:
raise RuntimeError("phase_idx must be specified for multiphase OCP")
nlp = self.nlp[phase]
custom_plot = CustomPlot(update_function, **parameters)
plot_name = "no_name"
if fig_name in nlp.plot:
# Make sure we add a unique name in the dict
custom_plot.combine_to = fig_name
if fig_name:
cmp = 0
while True:
plot_name = f"{fig_name}_phase{phase}_{cmp}"
if plot_name not in nlp.plot:
break
cmp += 1
else:
plot_name = fig_name
nlp.plot[plot_name] = custom_plot
def prepare_plots(
self,
automatically_organize: bool = True,
show_bounds: bool = False,
shooting_type: Shooting = Shooting.MULTIPLE,
use_scipy_integrator: bool = False,
) -> PlotOcp:
"""
Create all the plots associated with the OCP
Parameters
----------
automatically_organize: bool
If the graphs should be parsed on the screen
show_bounds: bool
If the ylim should fit the bounds
shooting_type: Shooting
What type of integration
use_scipy_integrator: bool
Use the scipy solve_ivp integrator for RungeKutta 45 instead of currently defined integrator
Returns
-------
The PlotOcp class
"""
return PlotOcp(
self,
automatically_organize=automatically_organize,
show_bounds=show_bounds,
shooting_type=shooting_type,
use_scipy_integrator=use_scipy_integrator,
)
def solve(
self,
solver: Solver = Solver.IPOPT,
warm_start: Solution = None,
show_online_optim: bool = False,
show_options: dict = None,
solver_options: dict = None,
) -> Solution:
"""
Call the solver to actually solve the ocp
Parameters
----------
solver: Solver
The solver which will be used to solve the ocp
warm_start: Solution
The solution to pass to the warm start method
show_online_optim: bool
If the plot should be shown while optimizing. It will slow down the optimization a bit and is only
available with Solver.IPOPT
show_options: dict
The graphs option to pass to PlotOcp
solver_options: dict
Any options to change the behavior of the solver. To know which options are available, you can refer to the
manual of the corresponding solver
Returns
-------
The optimized solution structure
"""
if solver == Solver.IPOPT and self.solver_type != Solver.IPOPT:
from ..interfaces.ipopt_interface import IpoptInterface
self.solver = IpoptInterface(self)
elif solver == Solver.ACADOS and self.solver_type != Solver.ACADOS:
from ..interfaces.acados_interface import AcadosInterface
if solver_options is None:
solver_options = {}
self.solver = AcadosInterface(self, **solver_options)
elif self.solver_type == Solver.NONE:
raise RuntimeError("Solver not specified")
self.solver_type = solver
if show_online_optim:
self.solver.online_optim(self, show_options)
self.solver.configure(solver_options)
if warm_start is not None:
OptimalControlProgram.set_warm_start(sol=warm_start)
self.solver.solve()
return Solution(self, self.solver.get_optimized_value())
def set_warm_start(self, sol: Solution):
"""
Modify x and u initial guess based on a solution.
Parameters
----------
sol: Solution
The solution to initiate the OCP from
"""
state, ctrl, param = sol.states, sol.controls, sol.parameters
u_init_guess = InitialGuessList()
x_init_guess = InitialGuessList()
param_init_guess = InitialGuessList()
for i in range(self.n_phases):
if self.n_phases == 1:
if self.nlp[i].control_type == ControlType.LINEAR_CONTINUOUS:
u_init_guess.add(ctrl["all"], interpolation=InterpolationType.EACH_FRAME)
else:
u_init_guess.add(ctrl["all"][:, :-1], interpolation=InterpolationType.EACH_FRAME)
x_init_guess.add(state["all"], interpolation=InterpolationType.EACH_FRAME)
else:
if self.nlp[i].control_type == ControlType.LINEAR_CONTINUOUS:
u_init_guess.add(ctrl[i]["all"], interpolation=InterpolationType.EACH_FRAME)
else:
u_init_guess.add(ctrl[i]["all"][:, :-1], interpolation=InterpolationType.EACH_FRAME)
x_init_guess.add(state[i]["all"], interpolation=InterpolationType.EACH_FRAME)
for key in param:
if key != "all":
param_init_guess.add(param[key], name=key)
self.update_initial_guess(x_init=x_init_guess, u_init=u_init_guess, param_init=param_init_guess)
self.solver.set_lagrange_multiplier(sol)
def save(self, sol: Solution, file_path: str, stand_alone: bool = False):
"""
Save the ocp and solution structure to the hard drive. It automatically create the required
folder if it does not exists. Please note that biorbd is required to load back this structure.
Parameters
----------
sol: Solution
The solution structure to save
file_path: str
The path to solve the structure. It creates a .bo (BiOptim file)
stand_alone: bool
If set to True, the variable dictionaries (states, controls and parameters) are saved instead of the full
Solution class itself. This allows to load the saved file into a setting where bioptim is not installed
using the pickle package, but prevents from using the class methods Solution offers after loading the file
"""
_, ext = os.path.splitext(file_path)
if ext == "":
file_path = file_path + ".bo"
elif ext != ".bo":
raise RuntimeError(f"Incorrect extension({ext}), it should be (.bo) or (.bob) if you use save_get_data.")
if stand_alone:
# TODO check if this file is loaded when load is used, and raise an error
data_to_save = sol.states, sol.controls, sol.parameters
else:
sol_copy = sol.copy()
sol_copy.ocp = None # Ocp is not pickable
data_to_save = {"ocp_initializer": self.original_values, "sol": sol_copy, "versions": self.version}
# Create folder if necessary
directory, _ = os.path.split(file_path)
if directory != "" and not os.path.isdir(directory):
os.makedirs(directory)
with open(file_path, "wb") as file:
pickle.dump(data_to_save, file)
@staticmethod
def load(file_path: str) -> list:
"""
Reload a previous optimization (*.bo) saved using save
Parameters
----------
file_path: str
The path to the *.bo file
Returns
-------
The ocp and sol structure. If it was saved, the iterations are also loaded
"""
with open(file_path, "rb") as file:
data = pickle.load(file)
ocp = OptimalControlProgram(**data["ocp_initializer"])
for key in data["versions"].keys():
if data["versions"][key] != ocp.version[key]:
raise RuntimeError(
f"Version of {key} from file ({data['versions'][key]}) is not the same as the "
f"installed version ({ocp.version[key]})"
)
sol = data["sol"]
sol.ocp = Solution.SimplifiedOCP(ocp)
out = [ocp, sol]
return out
def print(
self,
to_console: bool = True,
to_graph: bool = True,
):
if to_console:
display_console = OcpToConsole(self)
display_console.print()
if to_graph:
display_graph = OcpToGraph(self)
display_graph.print()
def _define_time(
self,
phase_time: Union[int, float, list, tuple],
objective_functions: ObjectiveList,
constraints: ConstraintList,
):
"""
Declare the phase_time vector in v. If objective_functions or constraints defined a time optimization,
a sanity check is perform and the values of initial guess and bounds for these particular phases
Parameters
----------
phase_time: Union[int, float, list, tuple]
The time of all the phases
objective_functions: ObjectiveList
All the objective functions. It is used to scan if any time optimization was defined
constraints: ConstraintList
All the constraint functions. It is used to scan if any free time was defined
"""
def define_parameters_phase_time(
ocp: OptimalControlProgram,
penalty_functions: Union[ObjectiveList, ConstraintList],
_initial_time_guess: list,
_phase_time: list,
_time_min: list,
_time_max: list,
_has_penalty: list = None,
) -> list:
"""
Sanity check to ensure that only one time optimization is defined per phase. It also creates the time vector
for initial guesses and bounds
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
penalty_functions: Union[ObjectiveList, ConstraintList]
The list to parse to ensure no double free times are declared
_initial_time_guess: list
The list of all initial guesses for the free time optimization
_phase_time: list
Replaces the values where free time is found for MX or SX
_time_min: list
Minimal bounds for the time parameter
_time_max: list
Maximal bounds for the time parameter
_has_penalty: list[bool]
If a penalty was previously found. This should be None on the first call to ensure proper initialization
Returns
-------
The state of has_penalty
"""
if _has_penalty is None:
_has_penalty = [False] * ocp.n_phases
for i, penalty_functions_phase in enumerate(penalty_functions):
for pen_fun in penalty_functions_phase:
if not pen_fun:
continue
if (
pen_fun.type == ObjectiveFcn.Mayer.MINIMIZE_TIME
or pen_fun.type == ObjectiveFcn.Lagrange.MINIMIZE_TIME
or pen_fun.type == ConstraintFcn.TIME_CONSTRAINT
):
if _has_penalty[i]:
raise RuntimeError("Time constraint/objective cannot declare more than once")
_has_penalty[i] = True
_initial_time_guess.append(_phase_time[i])
_phase_time[i] = ocp.cx.sym(f"time_phase_{i}", 1, 1)
if pen_fun.type.get_type() == ConstraintFunction:
_time_min.append(pen_fun.min_bound if pen_fun.min_bound else 0)
_time_max.append(pen_fun.max_bound if pen_fun.max_bound else inf)
else:
_time_min.append(pen_fun.params["min_bound"] if "min_bound" in pen_fun.params else 0)
_time_max.append(pen_fun.params["max_bound"] if "max_bound" in pen_fun.params else inf)
return _has_penalty
NLP.add(self, "t_initial_guess", phase_time, False)
self.original_phase_time = phase_time
if isinstance(phase_time, (int, float)):
phase_time = [phase_time]
phase_time = list(phase_time)
initial_time_guess, time_min, time_max = [], [], []
has_penalty = define_parameters_phase_time(
self, objective_functions, initial_time_guess, phase_time, time_min, time_max
)
define_parameters_phase_time(
self, constraints, initial_time_guess, phase_time, time_min, time_max, _has_penalty=has_penalty
)
# Add to the nlp
NLP.add(self, "tf", phase_time, False)
NLP.add(self, "t0", [0] + [nlp.tf for i, nlp in enumerate(self.nlp) if i != len(self.nlp) - 1], False)
NLP.add(self, "dt", [self.nlp[i].tf / max(self.nlp[i].ns, 1) for i in range(self.n_phases)], False)
# Add to the v vector
i = 0
for nlp in self.nlp:
if isinstance(nlp.tf, self.cx):
time_bounds = Bounds(time_min[i], time_max[i], interpolation=InterpolationType.CONSTANT)
time_init = InitialGuess(initial_time_guess[i])
time_param = Parameter(
cx=nlp.tf, function=None, size=1, bounds=time_bounds, initial_guess=time_init, name="time"
)
self.v.add_parameter(time_param)
i += 1
def __modify_penalty(self, new_penalty: Union[PenaltyOption, Parameter]):
"""
The internal function to modify a penalty. It is also stored in the original_values, meaning that if one
overrides an objective only the latter is preserved when saved
Parameters
----------
new_penalty: PenaltyOption
Any valid option to add to the program
"""
if not new_penalty:
return
phase_idx = new_penalty.phase
# Copy to self.original_values so it can be save/load
pen = new_penalty.type.get_type()
self.original_values[pen.penalty_nature()].add(deepcopy(new_penalty))
new_penalty.add_or_replace_to_penalty_pool(self, self.nlp[phase_idx])
| 41.580208 | 120 | 0.631961 |
acf5d5f2773f051ec3b60071d684fd7fc78a90fa | 2,212 | py | Python | chainer_chemistry/dataset/preprocessors/gin_gwm_preprocessor.py | diam045/chainer-chemistry | aedd64049e7b2480a59c44b186171296ea69e55e | [
"MIT"
] | null | null | null | chainer_chemistry/dataset/preprocessors/gin_gwm_preprocessor.py | diam045/chainer-chemistry | aedd64049e7b2480a59c44b186171296ea69e55e | [
"MIT"
] | null | null | null | chainer_chemistry/dataset/preprocessors/gin_gwm_preprocessor.py | diam045/chainer-chemistry | aedd64049e7b2480a59c44b186171296ea69e55e | [
"MIT"
] | null | null | null | from chainer_chemistry.dataset.preprocessors.common \
import construct_atomic_number_array, construct_adj_matrix
from chainer_chemistry.dataset.preprocessors.common import construct_supernode_feature
from chainer_chemistry.dataset.preprocessors.common import type_check_num_atoms
from chainer_chemistry.dataset.preprocessors.mol_preprocessor \
import MolPreprocessor
class GINGWMPreprocessor(MolPreprocessor):
"""GIN-GWM Preprocessor
"""
def __init__(self, max_atoms=-1, out_size=-1, out_size_super=-1, add_Hs=False):
"""
initialize the GTN Preprocessor.
:param max_atoms: integer, Max number of atoms for each molecule,
if the number of atoms is more than this value,
this data is simply ignored.
Setting negative value indicates no limit for max atoms.
:param out_size: integer, It specifies the size of array returned by
`get_input_features`.
If the number of atoms in the molecule is less than this value,
the returned arrays is padded to have fixed size.
Setting negative value indicates do not pad returned array.
:param out_size_super (int): indicate the length of the super node feature.
:param add_Hs: boolean. if true, add Hydrogens explicitly.
"""
super(GINGWMPreprocessor, self).__init__(add_Hs=add_Hs)
if max_atoms >= 0 and out_size >= 0 and max_atoms > out_size:
raise ValueError('max_atoms {} must be less or equal to '
'out_size {}'.format(max_atoms, out_size))
self.max_atoms = max_atoms
self.out_size = out_size
self.out_size_super = out_size_super
def get_input_features(self, mol):
"""get input features
Args:
mol (Mol):
Returns:
"""
type_check_num_atoms(mol, self.max_atoms)
atom_array = construct_atomic_number_array(mol, out_size=self.out_size)
adj_array = construct_adj_matrix(mol, out_size=self.out_size)
super_node_x = construct_supernode_feature(mol, atom_array, adj_array, out_size=self.out_size_super)
return atom_array, adj_array, super_node_x | 42.538462 | 108 | 0.69123 |
acf5d7a151db07e1f584b6b06d770ce3d8463c33 | 5,907 | py | Python | pystac/utils.py | gwnoseworthy/pystac | c87f073bacc82ae5dfb125f74cb29774678dad11 | [
"Apache-2.0"
] | null | null | null | pystac/utils.py | gwnoseworthy/pystac | c87f073bacc82ae5dfb125f74cb29774678dad11 | [
"Apache-2.0"
] | null | null | null | pystac/utils.py | gwnoseworthy/pystac | c87f073bacc82ae5dfb125f74cb29774678dad11 | [
"Apache-2.0"
] | null | null | null | import os
import posixpath
from urllib.parse import (urlparse, ParseResult as URLParseResult)
from datetime import timezone
import dateutil.parser
# Allow for modifying the path library for testability
# (i.e. testing Windows path manipulation on non-Windows systems)
_pathlib = os.path
def _urlparse(href):
"""Version of URL parse that takes into account windows paths.
A windows absolute path will be parsed with a scheme from urllib.parse.urlparse.
This method will take this into account.
"""
parsed = urlparse(href)
if parsed.scheme != '' and href.lower().startswith('{}:\\'.format(parsed.scheme)):
return URLParseResult(scheme='',
netloc='',
path='{}:{}'.format(parsed.scheme, parsed.path),
params=parsed.params,
query=parsed.query,
fragment=parsed.fragment)
else:
return parsed
def _join(is_path, *args):
"""Version of os.path.join that takes into account whether or not we are working
with a URL.
A windows system shouldn't use os.path.join if we're working with a URL.
"""
if is_path:
return _pathlib.join(*args)
else:
return posixpath.join(*args)
def make_relative_href(source_href, start_href, start_is_dir=False):
"""Makes a given HREF relative to the given starting HREF.
Args:
source_href (str): The HREF to make relative.
start_href (str): The HREF that the resulting HREF will be relative with
respect to.
start_is_dir (str): If True, the start_href is treated as a directory.
Otherwise, the start_href is considered to be a file HREF. Defaults to False.
Returns:
str: The relative HREF. If the source_href and start_href do not share a common
parent, then source_href will be returned unchanged.
"""
parsed_source = _urlparse(source_href)
parsed_start = _urlparse(start_href)
if not (parsed_source.scheme == parsed_start.scheme
and parsed_source.netloc == parsed_start.netloc):
return source_href
is_path = parsed_start.scheme == ''
if start_is_dir:
start_dir = parsed_start.path
else:
start_dir = _pathlib.dirname(parsed_start.path)
relpath = _pathlib.relpath(parsed_source.path, start_dir)
if not is_path:
relpath = relpath.replace('\\', '/')
if not relpath.startswith('.'):
relpath = _join(is_path, '.', relpath)
return relpath
def make_absolute_href(source_href, start_href=None, start_is_dir=False):
"""Makes a given HREF absolute based on the given starting HREF.
Args:
source_href (str): The HREF to make absolute.
start_href (str): The HREF that will be used as the basis for which to resolve
relative paths, if source_href is a relative path. Defaults to the
current working directory.
start_is_dir (str): If True, the start_href is treated as a directory.
Otherwise, the start_href is considered to be a file HREF. Defaults to False.
Returns:
str: The absolute HREF. If the source_href is already an absolute href,
then it will be returned unchanged. If the source_href it None, it will
return None.
"""
if source_href is None:
return None
if start_href is None:
start_href = os.getcwd()
start_is_dir = True
parsed_source = _urlparse(source_href)
if parsed_source.scheme == '':
if not _pathlib.isabs(parsed_source.path):
parsed_start = _urlparse(start_href)
is_path = parsed_start.scheme == ''
if start_is_dir:
start_dir = parsed_start.path
else:
start_dir = _pathlib.dirname(parsed_start.path)
abs_path = _pathlib.abspath(_join(is_path, start_dir, parsed_source.path))
if parsed_start.scheme != '':
if not is_path:
abs_path = abs_path.replace('\\', '/')
return '{}://{}{}'.format(parsed_start.scheme, parsed_start.netloc, abs_path)
else:
return abs_path
else:
return source_href
else:
return source_href
def is_absolute_href(href):
"""Determines if an HREF is absolute or not.
Args:
href (str): The HREF to consider.
Returns:
bool: True if the given HREF is absolute, False if it is relative.
"""
parsed = _urlparse(href)
return parsed.scheme != '' or _pathlib.isabs(parsed.path)
def datetime_to_str(dt):
"""Convert a python datetime to an ISO8601 string
Args:
dt (datetime): The datetime to convert.
Returns:
str: The ISO8601 formatted string representing the datetime.
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
timestamp = dt.isoformat()
zulu = '+00:00'
if timestamp.endswith(zulu):
timestamp = '{}Z'.format(timestamp[:-len(zulu)])
return timestamp
def str_to_datetime(s):
return dateutil.parser.parse(s)
def geometry_to_bbox(geometry):
"""Extract the bounding box from a geojson geometry
Args:
geometry (dict): GeoJSON geometry dict
Returns:
list: Bounding box of geojson geometry, formatted according to:
https://tools.ietf.org/html/rfc7946#section-5
"""
coords = geometry['coordinates']
lats = []
lons = []
def extract_coords(coords):
for x in coords:
if isinstance(x[0], list):
extract_coords(x)
else:
lat, lon = x
lats.append(lat)
lons.append(lon)
extract_coords(coords)
lons.sort()
lats.sort()
bbox = [lats[0], lons[0], lats[-1], lons[-1]]
return bbox
| 30.606218 | 93 | 0.62519 |
acf5d7dbb1dda05818c7c3fa6ad50a7a371508ad | 2,804 | py | Python | pygs/graphserver/compiler/dedupe.py | jeriksson/graphserver | b853f3cecc8af00e02a04fd4e489c27527688284 | [
"BSD-3-Clause-Clear"
] | 1 | 2018-05-14T02:43:55.000Z | 2018-05-14T02:43:55.000Z | pygs/graphserver/compiler/dedupe.py | jeriksson/graphserver | b853f3cecc8af00e02a04fd4e489c27527688284 | [
"BSD-3-Clause-Clear"
] | null | null | null | pygs/graphserver/compiler/dedupe.py | jeriksson/graphserver | b853f3cecc8af00e02a04fd4e489c27527688284 | [
"BSD-3-Clause-Clear"
] | null | null | null | # eliminate duplicate service periods from a GTFS database
from graphserver.ext.gtfs.gtfsdb import GTFSDatabase
import sys
from optparse import OptionParser
def main():
usage = """usage: python dedupe.py <graphdb_filename>"""
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit(-1)
graphdb_filename = args[0]
gtfsdb = GTFSDatabase( graphdb_filename )
query = """
SELECT count(*), monday, tuesday, wednesday, thursday, friday, saturday, sunday, start_date, end_date
FROM calendar
GROUP BY monday, tuesday, wednesday, thursday, friday, saturday, sunday, start_date, end_date"""
duped_periods = gtfsdb.execute( query )
equivilants = []
for count, m,t,w,th,f,s,su,start_date,end_date in duped_periods:
# no need to check for dupes if there's only one
if count==1:
continue
#print count, m, t, w, th, f, s, su, start_date, end_date
# get service_ids for this dow/start_date/end_date combination
service_ids = [x[0] for x in list( gtfsdb.execute( "SELECT service_id FROM calendar where monday=? and tuesday=? and wednesday=? and thursday=? and friday=? and saturday=? and sunday=? and start_date=? and end_date=?", (m,t,w,th,f,s,su,start_date,end_date) ) ) ]
# group by service periods with the same set of exceptions
exception_set_grouper = {}
for service_id in service_ids:
exception_set = list(gtfsdb.execute( "SELECT date, exception_type FROM calendar_dates WHERE service_id=?", (service_id,) ) )
exception_set.sort()
exception_set = tuple(exception_set)
exception_set_grouper[exception_set] = exception_set_grouper.get(exception_set,[])
exception_set_grouper[exception_set].append( service_id )
# extend list of equivilants
for i, exception_set_group in enumerate( exception_set_grouper.values() ):
equivilants.append( ("%d%d%d%d%d%d%d-%s-%s-%d"%(m,t,w,th,f,s,su,start_date,end_date,i), exception_set_group) )
for new_name, old_names in equivilants:
for old_name in old_names:
print old_name, new_name
c = gtfsdb.conn.cursor()
c.execute( "UPDATE calendar SET service_id=? WHERE service_id=?", (new_name, old_name) )
c.execute( "UPDATE calendar_dates SET service_id=? WHERE service_id=?", (new_name, old_name) )
c.execute( "UPDATE trips SET service_id=? WHERE service_id=?", (new_name, old_name) )
gtfsdb.conn.commit()
c.close()
if __name__=='__main__':
main() | 40.057143 | 271 | 0.631598 |
acf5d8aa411c3e795320b30584cb738a7e5d0609 | 1,926 | py | Python | rgd/geodata/models/mixins.py | Erotemic/ResonantGeoData | ff9aec9daf73353bcc95a9d30e98fcc5cdffc6e0 | [
"Apache-2.0"
] | null | null | null | rgd/geodata/models/mixins.py | Erotemic/ResonantGeoData | ff9aec9daf73353bcc95a9d30e98fcc5cdffc6e0 | [
"Apache-2.0"
] | null | null | null | rgd/geodata/models/mixins.py | Erotemic/ResonantGeoData | ff9aec9daf73353bcc95a9d30e98fcc5cdffc6e0 | [
"Apache-2.0"
] | null | null | null | """Mixin helper classes."""
from typing import Iterable
from celery import Task
from django.contrib.gis.db import models
from django.utils.translation import gettext_lazy as _
class Status(models.TextChoices):
CREATED = 'created', _('Created but not queued')
QUEUED = 'queued', _('Queued for processing')
RUNNING = 'running', _('Processing')
FAILED = 'failed', _('Failed')
SUCCEEDED = 'success', _('Succeeded')
class TaskEventMixin(models.Model):
"""A mixin for models that must call a set of celery tasks.
This mixin adds three class attributes:
* ``task_funcs``, which should be the list of celery task functions that should be run
on this model instance. Subclasses should set this attribute.
* ``status``, a model field representing task execution status.
* ``failure_reason``, a model field that can be set on this instance from within
tasks for human-readable error logging.
NOTE: you still need to register the pre/post save event. on_commit events
should be registered as post_save events, not pre_save.
"""
class Meta:
abstract = True
failure_reason = models.TextField(null=True)
status = models.CharField(max_length=20, default=Status.CREATED, choices=Status.choices)
task_funcs: Iterable[Task] = []
def _run_tasks(self) -> None:
if not self.task_funcs:
return
self.status = Status.QUEUED
self.save(
update_fields=[
'status',
]
)
for func in self.task_funcs:
func.delay(self.id)
def _post_save_event_task(self, created: bool, *args, **kwargs) -> None:
if not created and kwargs.get('update_fields'):
return
self._run_tasks()
def _on_commit_event_task(self, *args, **kwargs) -> None:
if kwargs.get('update_fields'):
return
self._run_tasks()
| 31.064516 | 92 | 0.654725 |
acf5da171e52fcfaafc2343203c6d0b2bd34a0f3 | 1,517 | py | Python | check-in/daily/Merge-K-Sorted-Lists-(medium).py | huandrew99/LeetCode | aa36b48d06100ce5f0bc64c789a906ec29409440 | [
"MIT"
] | 36 | 2021-12-23T15:44:41.000Z | 2022-03-31T04:26:26.000Z | check-in/daily/Merge-K-Sorted-Lists-(medium).py | wzy0766/LeetCode-1 | 3070e672c519e8af74966811b8058a9baef8c0bc | [
"MIT"
] | null | null | null | check-in/daily/Merge-K-Sorted-Lists-(medium).py | wzy0766/LeetCode-1 | 3070e672c519e8af74966811b8058a9baef8c0bc | [
"MIT"
] | 11 | 2022-02-26T22:41:26.000Z | 2022-03-02T07:18:41.000Z | """
LC 23
You are given an array of k linked-lists lists, each linked-list is sorted in ascending order.
Merge all the linked-lists into one sorted linked-list and return it.
Example 1:
Input: lists = [[1,4,5],[1,3,4],[2,6]]
Output: [1,1,2,3,4,4,5,6]
Explanation: The linked-lists are:
[
1->4->5,
1->3->4,
2->6
]
merging them into one sorted list:
1->1->2->3->4->4->5->6
Example 2:
Input: lists = []
Output: []
Example 3:
Input: lists = [[]]
Output: []
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:
if not lists:
return None
step = 1
while step < len(lists):
for i in range(0, len(lists), 2 * step):
if i + step < len(lists):
lists[i] = self.merge2(lists[i], lists[i + step])
step *= 2
return lists[0]
def merge2(self, n1, n2):
sent = p = ListNode()
while n1 and n2:
if n1.val < n2.val:
p.next = n1
n1 = n1.next
else:
p.next = n2
n2 = n2.next
p = p.next
if n1:
p.next = n1
elif n2:
p.next = n2
return sent.next
"""
Time O(Nlogk)
Space O(1)
"""
| 22.308824 | 95 | 0.486486 |
acf5dacb25cbe19f45d6e80a4412ea4505ac3033 | 4,206 | py | Python | aiida/common/ipython/ipython_magics.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | aiida/common/ipython/ipython_magics.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | aiida/common/ipython/ipython_magics.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
An IPython extension that provides a magic command to load
basic aiida commands.
This makes it much easier to start.
Produces output in:
* Plaintext (IPython [qt]console)
* HTML (IPython notebook, ``nbconvert --to html``, ``--to slides``)
* JSON (IPython notebook ``.ipynb`` files)
* LaTeX (e.g. ``ipython nbconvert example.ipynb --to LaTeX --post PDF``)
Notes on how to load it at start:
https://ipython.org/ipython-doc/3/config/intro.html
Usage
======
.. sourcecode:: ipython
In [1]: %load_ext aiida_magic
In [2]: %aiida
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import IPython
import six
from IPython.core.magic import magics_class, line_magic, Magics, needs_local_scope
import aiida.utils.json as json
def add_to_ns(local_ns, name, obj):
"""
Add a new variable with name ``name`` and value ``obj`` to the
namespace ``local_ns``, optionally showing a warning if we are
hiding an existing variable.
.. todo:: implement the warning.
Example::
# assuming that local_ns is a dictionary, e.g. from locals()
import sys
add_to_ns(local_ns, 'sys', sys)
"""
if name in local_ns:
# TODO: print warning, or raise
pass
local_ns[name] = obj
@magics_class
class AiiDALoaderMagics(Magics):
@needs_local_scope
@line_magic
def aiida(self, line='', local_ns=None):
"""Load AiiDA in ipython (checking if it was already loaded), and
inserts in the namespace the main AiiDA classes (the same that are
loaded in ``verdi shell``.
Usage::
%aiida [optional parameters]
.. todo:: implement parameters, e.g. for the profile to load.
"""
from aiida import is_dbenv_loaded, load_dbenv
from aiida.cmdline.utils.shell import get_start_namespace
self.is_warning = False
if is_dbenv_loaded():
self.current_state = "Note! AiiDA DB environment already loaded! I do not reload it again."
self.is_warning = True
else:
load_dbenv()
self.current_state = "Loaded AiiDA DB environment."
user_ns = get_start_namespace()
for k, v in six.iteritems(user_ns):
add_to_ns(local_ns, k, v)
return self
def _repr_json_(self):
"""
Output in JSON format.
"""
obj = {'current_state': self.current_state}
if IPython.version_info[0] >= 3:
return obj
else:
return json.dumps(obj)
def _repr_html_(self):
"""
Output in HTML format.
"""
html = "<p>"
if self.is_warning:
html += "<strong>"
html += self.current_state
if self.is_warning:
html += "</strong>"
html += "</p>"
return html
def _repr_latex_(self):
"""
Output in LaTeX format.
"""
if self.is_warning:
latex = "\\emph{%s}\n" % self.current_state
else:
latex = "%s\n" % self.current_state
return latex
def _repr_pretty_(self, pp, cycle):
"""
Output in text format.
"""
if self.is_warning:
warning_str = "** "
else:
warning_str = ""
text = "%s%s\n" % (warning_str, self.current_state)
pp.text(text)
def load_ipython_extension(ipython):
"""
Triggers the load of all the AiiDA magic commands.
"""
ipython.register_magics(AiiDALoaderMagics)
| 27.671053 | 103 | 0.568236 |
acf5db1d9ef8b97598c1bafa2baf7ec5f95a5676 | 5,128 | py | Python | validations_libs/cli/history.py | openstack/validations-libs | 7d416acbe89a9ba23cabfd4e97c80affe57e06cb | [
"Apache-2.0"
] | 1 | 2020-03-11T09:13:28.000Z | 2020-03-11T09:13:28.000Z | validations_libs/cli/history.py | openstack/validations-libs | 7d416acbe89a9ba23cabfd4e97c80affe57e06cb | [
"Apache-2.0"
] | null | null | null | validations_libs/cli/history.py | openstack/validations-libs | 7d416acbe89a9ba23cabfd4e97c80affe57e06cb | [
"Apache-2.0"
] | 1 | 2021-03-23T08:31:43.000Z | 2021-03-23T08:31:43.000Z | #!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from validations_libs import constants
from validations_libs.validation_actions import ValidationActions
from validations_libs.validation_logs import ValidationLogs
from validations_libs.cli.base import BaseCommand, BaseLister
class ListHistory(BaseLister):
"""Display Validations execution history"""
def get_parser(self, parser):
parser = super(ListHistory, self).get_parser(parser)
parser.add_argument('--validation',
metavar="<validation_id>",
type=str,
help='Display execution history for a validation')
parser.add_argument('--limit',
dest='history_limit',
type=int,
default=15,
help=(
'Display <n> most recent '
'runs of the selected <validation>. '
'<n> must be > 0\n'
'The default display limit is set to 15.\n'))
parser.add_argument('--validation-log-dir', dest='validation_log_dir',
default=constants.VALIDATIONS_LOG_BASEDIR,
help=("Path where the validation log files "
"is located."))
# Merge config and CLI args:
return self.base.set_argument_parser(parser)
def take_action(self, parsed_args):
validation_log_dir = parsed_args.validation_log_dir
history_limit = parsed_args.history_limit
if history_limit < 1:
msg = ("Number <n> of the most recent runs must be > 0. "
"You have provided {}").format(history_limit)
raise ValueError(msg)
self.app.LOG.info(
("Limiting output to the maximum of "
"{} last validations.").format(history_limit))
actions = ValidationActions()
return actions.show_history(
validation_ids=parsed_args.validation,
log_path=parsed_args.validation_log_dir,
history_limit=history_limit)
class GetHistory(BaseCommand):
"""Display details about a specific Validation execution"""
def get_parser(self, parser):
parser = super(GetHistory, self).get_parser(parser)
parser.add_argument('uuid',
metavar="<uuid>",
type=str,
help='Validation UUID Run')
parser.add_argument('--full',
action='store_true',
help='Show Full Details for the run')
parser.add_argument('--validation-log-dir', dest='validation_log_dir',
default=constants.VALIDATIONS_LOG_BASEDIR,
help=("Path where the validation log files "
"is located."))
# Merge config and CLI args:
return self.base.set_argument_parser(parser)
def take_action(self, parsed_args):
self.app.LOG.debug(
(
"Obtaining information about the validation run {}\n"
"From directory {}"
).format(
parsed_args.uuid,
parsed_args.validation_log_dir))
vlogs = ValidationLogs(logs_path=parsed_args.validation_log_dir)
try:
log_files = vlogs.get_logfile_content_by_uuid(parsed_args.uuid)
except IOError as io_error:
raise RuntimeError(
(
"Encountered a following IO error while attempting read a log "
"file linked to UUID: {} .\n"
"{}"
).format(
parsed_args.uuid,
io_error))
if log_files:
if parsed_args.full:
for log_file in log_files:
print(json.dumps(log_file, indent=4, sort_keys=True))
else:
for log_file in log_files:
for validation_result in log_file.get('validation_output', []):
print(json.dumps(validation_result['task'],
indent=4,
sort_keys=True))
else:
raise RuntimeError(
"Could not find the log file linked to this UUID: {}".format(
parsed_args.uuid))
| 39.751938 | 83 | 0.553822 |
acf5dbbc31534dc389e8de0d51ca9093fed29915 | 507 | py | Python | ocr_master.py | AakashKhatu/ParallelOCR | ff7c22078bcc2fde1232bfc066d2ef2e44617b5b | [
"MIT"
] | 1 | 2019-03-21T04:37:39.000Z | 2019-03-21T04:37:39.000Z | ocr_master.py | AakashKhatu/ParallelOCR | ff7c22078bcc2fde1232bfc066d2ef2e44617b5b | [
"MIT"
] | null | null | null | ocr_master.py | AakashKhatu/ParallelOCR | ff7c22078bcc2fde1232bfc066d2ef2e44617b5b | [
"MIT"
] | null | null | null | import os
from ocr_worker import Worker
from time import time
x = 8
files = os.listdir("sentences")[1:]
start_indexes = [int(len(files)*i/x) for i in range(x)]
indexes = zip(start_indexes, start_indexes[1:])
if __name__ == "__main__":
start_time = time()
print("Master started execution")
for i, (start, end) in enumerate(indexes):
w = Worker(files[start:end])
w.start()
print("Master finished Execution , Completed in : {0} \
seconds".format(time()-start_time))
| 25.35 | 59 | 0.66075 |
acf5dbd1951b079f48c88ceeb87413cb6432187d | 7,315 | py | Python | tests/test_io.py | jeffkinnison/florin | 94e76812e9fe27c86b2ce39313d07beb21c8b478 | [
"MIT"
] | 6 | 2019-06-03T19:11:05.000Z | 2021-01-13T06:35:43.000Z | tests/test_io.py | jeffkinnison/florin | 94e76812e9fe27c86b2ce39313d07beb21c8b478 | [
"MIT"
] | 4 | 2019-06-10T14:48:15.000Z | 2019-10-01T16:48:58.000Z | tests/test_io.py | jeffkinnison/florin | 94e76812e9fe27c86b2ce39313d07beb21c8b478 | [
"MIT"
] | 1 | 2019-09-25T17:57:23.000Z | 2019-09-25T17:57:23.000Z | import glob
import os
import h5py
import pytest
import numpy as np
from skimage.io import imread, imsave
from florin.io import load, load_image, load_images, load_npy, load_hdf5, \
load_tiff, save, save_image, save_images, save_npy, \
save_hdf5, save_tiff
@pytest.fixture(scope='module')
def load_setup(tmpdir_factory):
"""Set up a small test case for loading image data"""
data = np.random.randint(0, high=256, size=(100, 300, 300), dtype=np.uint8)
tmpdir = tmpdir_factory.mktemp('data')
os.makedirs(os.path.join(str(tmpdir), 'png'), exist_ok=True)
for i in range(data.shape[0]):
fname = str(i).zfill(3) + '.png'
imsave(os.path.join(str(tmpdir), 'png', fname), data[i])
imsave(os.path.join(str(tmpdir), 'data.tif'), data, plugin='tifffile')
imsave(os.path.join(str(tmpdir), 'data.tiff'), data, plugin='tifffile')
with h5py.File(os.path.join(str(tmpdir), 'data.h5'), 'w') as f:
f.create_dataset('stack', data=data)
f.create_dataset('foo', data=data)
np.save(os.path.join(str(tmpdir), 'data.npy'), data)
return data, str(tmpdir)
@pytest.fixture(scope='module')
def save_setup():
"""Set up data to test save functions."""
return np.random.randint(0, high=256, size=(100, 300, 300), dtype=np.uint8)
def test_load(load_setup):
"""Test that the load function works over all test filetypes."""
data, tmpdir = load_setup
loaded = load()(os.path.join(tmpdir, 'data.npy'))
assert np.all(loaded == data)
loaded = load()(os.path.join(tmpdir, 'data.h5'))
assert isinstance(loaded, h5py.Dataset)
assert np.all(loaded[:] == data)
loaded = load()(os.path.join(tmpdir, 'data.h5'), key='foo')
assert isinstance(loaded, h5py.Dataset)
assert np.all(loaded[:] == data)
loaded = load()(os.path.join(tmpdir, 'data.tif'))
assert np.all(loaded == data)
loaded = load()(os.path.join(tmpdir, 'data.tiff'))
assert np.all(loaded == data)
loaded = load()(os.path.join(tmpdir, 'png'))
assert np.all(loaded == data)
for i in range(data.shape[0]):
fname = fname = str(i).zfill(3) + '.png'
loaded = load()(os.path.join(tmpdir, 'png', fname))
assert np.all(loaded == data[i])
with pytest.raises(FileNotFoundError):
loaded = load()('/foo/bar.lksd')
def test_load_hdf5(load_setup):
data, tmpdir = load_setup
loaded = load_hdf5(os.path.join(tmpdir, 'data.h5'))
assert isinstance(loaded, h5py.Dataset)
assert np.all(loaded[:] == data)
loaded = load_hdf5(os.path.join(tmpdir, 'data.h5'), key='foo')
assert isinstance(loaded, h5py.Dataset)
assert np.all(loaded[:] == data[:])
def test_load_image(load_setup):
data, tmpdir = load_setup
for i in range(data.shape[0]):
fname = fname = str(i).zfill(3) + '.png'
loaded = load_image(os.path.join(tmpdir, 'png', fname))
assert np.all(loaded == data[i])
def test_load_images(load_setup):
data, tmpdir = load_setup
loaded = load_images(os.path.join(tmpdir, 'png'))
assert np.all(loaded == data)
def test_load_npy(load_setup):
data, tmpdir = load_setup
loaded = load_npy(os.path.join(tmpdir, 'data.npy'))
assert np.all(loaded == data)
def test_load_tiff(load_setup):
data, tmpdir = load_setup
loaded = load_tiff(os.path.join(tmpdir, 'data.tif'))
assert np.all(loaded == data)
loaded = load_tiff(os.path.join(tmpdir, 'data.tiff'))
assert np.all(loaded == data)
def test_save(save_setup, tmpdir):
data = save_setup
tmpdir = str(tmpdir)
fpath = os.path.join(tmpdir, 'data.h5')
save()(data, fpath)
assert os.path.isfile(fpath)
with h5py.File(fpath, 'r') as saved:
assert 'stack' in saved
assert np.all(saved['stack'][:] == data)
save()(data, fpath, key='foo')
assert os.path.isfile(fpath)
with h5py.File(fpath, 'r') as saved:
assert 'stack' in saved
assert 'foo' in saved
assert np.all(saved['stack'][:] == data)
fpath = os.path.join(tmpdir, 'data.npy')
save()(data, fpath)
assert os.path.isfile(fpath)
saved = np.load(fpath)
assert np.all(saved == data)
fpath = os.path.join(tmpdir, 'data.tif')
save()(data, fpath)
assert os.path.isfile(fpath)
saved = imread(fpath)
assert np.all(saved == data)
fpath = os.path.join(tmpdir, 'data.tiff')
save()(data, fpath)
assert os.path.isfile(fpath)
saved = imread(fpath)
assert np.all(saved == data)
fpath = os.path.join(tmpdir, 'png')
save()(data, fpath)
assert os.path.isdir(fpath)
imgs = sorted(glob.glob(os.path.join(fpath, '*.png')))
for i, img in enumerate(imgs):
fname = '{}.png'.format(str(i).zfill(3))
assert os.path.isfile(os.path.join(fpath, fname))
assert os.path.join(fpath, fname) == img
saved = imread(img)
assert np.all(saved == data[i])
for i in range(data.shape[0]):
fname = '{}.png'.format(str(i).zfill(3))
fpath = os.path.join(tmpdir, fname)
save()(data[i], fpath)
assert os.path.isfile(fpath)
saved = imread(fpath)
assert np.all(saved == data[i])
def test_save_hdf5(save_setup, tmpdir):
data = save_setup
tmpdir = str(tmpdir)
fpath = os.path.join(tmpdir, 'data.h5')
save_hdf5(data, fpath)
assert os.path.isfile(fpath)
with h5py.File(fpath, 'r') as saved:
assert 'stack' in saved
assert np.all(saved['stack'][:] == data)
save_hdf5(data, fpath, key='foo')
assert os.path.isfile(fpath)
with h5py.File(fpath, 'r') as saved:
assert 'stack' in saved
assert 'foo' in saved
assert np.all(saved['stack'][:] == data)
def test_save_image(save_setup, tmpdir):
data = save_setup
tmpdir = str(tmpdir)
for i in range(data.shape[0]):
fname = '{}.png'.format(str(i).zfill(3))
fpath = os.path.join(tmpdir, fname)
save_image(data[i], fpath)
assert os.path.isfile(fpath)
saved = imread(fpath)
assert np.all(saved == data[i])
def test_save_images(save_setup, tmpdir):
data = save_setup
tmpdir = str(tmpdir)
fpath = os.path.join(tmpdir, 'png')
save_images(data, fpath)
assert os.path.isdir(fpath)
imgs = sorted(glob.glob(os.path.join(fpath, '*.png')))
for i, img in enumerate(imgs):
fname = '{}.png'.format(str(i).zfill(3))
assert os.path.isfile(os.path.join(fpath, fname))
assert os.path.join(fpath, fname) == img
saved = imread(img)
assert np.all(saved == data[i])
def test_save_npy(save_setup, tmpdir):
data = save_setup
tmpdir = str(tmpdir)
fpath = os.path.join(tmpdir, 'data.npy')
save_npy(data, fpath)
assert os.path.isfile(fpath)
saved = np.load(fpath)
assert np.all(saved == data)
def test_save_tiff(save_setup, tmpdir):
data = save_setup
tmpdir = str(tmpdir)
fpath = os.path.join(tmpdir, 'data.tif')
save_tiff(data, fpath)
assert os.path.isfile(fpath)
saved = imread(fpath)
assert np.all(saved == data)
fpath = os.path.join(tmpdir, 'data.tiff')
save_tiff(data, fpath)
assert os.path.isfile(fpath)
saved = imread(fpath)
assert np.all(saved == data)
| 29.027778 | 79 | 0.62406 |
acf5dd507145764d2aa29f0dbd77cf1c124c6abe | 5,379 | py | Python | test/db/test_channel_manager.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 150 | 2016-10-05T11:09:36.000Z | 2022-03-06T16:24:41.000Z | test/db/test_channel_manager.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 27 | 2017-03-02T03:37:02.000Z | 2022-02-10T04:59:54.000Z | test/db/test_channel_manager.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 21 | 2016-11-11T07:51:48.000Z | 2020-04-26T21:38:33.000Z | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from uuid import uuid4 as uuid
from dino import environ
from dino.db.manager.channels import ChannelManager
from dino.exceptions import NoSuchChannelException
from dino.exceptions import EmptyChannelNameException
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
class FakeDb(object):
_channel_names = dict()
def get_channels(self):
return {ChannelManagerTest.CHANNEL_ID: (ChannelManagerTest.CHANNEL_NAME, 1, 'normal')}
def create_channel(self, name, uuid, user_id):
if name is None or len(name.strip()) == 0:
raise EmptyChannelNameException(uuid)
pass
def create_admin_room_for(self, channel_id):
pass
def get_channel_name(self, channel_id):
if channel_id != ChannelManagerTest.CHANNEL_ID:
raise NoSuchChannelException(channel_id)
return ChannelManagerTest.CHANNEL_NAME
def rename_channel(self, channel_id: str, channel_name: str):
if channel_name is None or len(channel_name.strip()) == 0:
raise EmptyChannelNameException(channel_id)
FakeDb._channel_names[channel_id] = channel_name
def get_owners_channel(self, channel_id):
if channel_id != ChannelManagerTest.CHANNEL_ID:
raise NoSuchChannelException(channel_id)
return {ChannelManagerTest.USER_ID: ChannelManagerTest.USER_NAME}
def get_admins_channel(self, channel_id):
if channel_id != ChannelManagerTest.CHANNEL_ID:
raise NoSuchChannelException(channel_id)
return {ChannelManagerTest.USER_ID: ChannelManagerTest.USER_NAME}
class ChannelManagerTest(TestCase):
CHANNEL_ID = '1234'
CHANNEL_NAME = 'Shanghai'
OTHER_CHANNEL_ID = '4321'
OTHER_CHANNEL_NAME = 'Beijing'
USER_ID = '5555'
USER_NAME = 'Batman'
def setUp(self):
environ.env.db = FakeDb()
self.manager = ChannelManager(environ.env)
FakeDb._channel_names[ChannelManagerTest.CHANNEL_ID] = ChannelManagerTest.CHANNEL_NAME
def test_get_channels_correct_length(self):
channels = self.manager.get_channels()
self.assertEqual(1, len(channels))
def test_get_channels_correct_uuid(self):
channels = self.manager.get_channels()
self.assertEqual(ChannelManagerTest.CHANNEL_ID, channels[0]['uuid'])
def test_get_channels_correct_name(self):
channels = self.manager.get_channels()
self.assertEqual(ChannelManagerTest.CHANNEL_NAME, channels[0]['name'])
def test_create_channel(self):
self.assertEqual(None, self.manager.create_channel(
ChannelManagerTest.OTHER_CHANNEL_NAME, ChannelManagerTest.OTHER_CHANNEL_ID, ChannelManagerTest.USER_ID))
def test_create_channel_empty_name(self):
value = self.manager.create_channel('', ChannelManagerTest.OTHER_CHANNEL_ID, ChannelManagerTest.USER_ID)
self.assertEqual(type(value), str)
def test_name_for_uuid(self):
self.assertEqual(ChannelManagerTest.CHANNEL_NAME, self.manager.name_for_uuid(ChannelManagerTest.CHANNEL_ID))
def test_name_for_uuid_no_such_channel(self):
value = self.manager.name_for_uuid(str(uuid()))
self.assertEqual(None, value)
def test_rename(self):
self.assertEqual(ChannelManagerTest.CHANNEL_NAME, FakeDb._channel_names[ChannelManagerTest.CHANNEL_ID])
value = self.manager.rename(ChannelManagerTest.CHANNEL_ID, 'foobar')
self.assertEqual(value, None)
self.assertEqual('foobar', FakeDb._channel_names[ChannelManagerTest.CHANNEL_ID])
def test_rename_empty_name(self):
self.assertEqual(ChannelManagerTest.CHANNEL_NAME, FakeDb._channel_names[ChannelManagerTest.CHANNEL_ID])
value = self.manager.rename(ChannelManagerTest.CHANNEL_ID, '')
self.assertEqual(type(value), str)
self.assertEqual(ChannelManagerTest.CHANNEL_NAME, FakeDb._channel_names[ChannelManagerTest.CHANNEL_ID])
def test_get_owners(self):
owners = self.manager.get_owners(ChannelManagerTest.CHANNEL_ID)
self.assertEqual(1, len(owners))
self.assertEqual(ChannelManagerTest.USER_ID, owners[0]['uuid'])
self.assertEqual(ChannelManagerTest.USER_NAME, owners[0]['name'])
def test_get_owners_no_such_channel(self):
owners = self.manager.get_owners(str(uuid()))
self.assertEqual(type(owners), str)
def test_get_admins(self):
admins = self.manager.get_admins(ChannelManagerTest.CHANNEL_ID)
self.assertEqual(1, len(admins))
self.assertEqual(ChannelManagerTest.USER_ID, admins[0]['uuid'])
self.assertEqual(ChannelManagerTest.USER_NAME, admins[0]['name'])
def test_get_admins_no_such_channel(self):
admins = self.manager.get_admins(str(uuid()))
self.assertEqual(type(admins), str)
| 39.844444 | 120 | 0.733036 |
acf5dd81abc221e7d4b566ad9777065b0904d780 | 632 | py | Python | tools/clean_invalid_image.py | RingLcy/jekyll-theme-chirpy | 150aa10f5f241a6170cd5f83c515e44fb052f071 | [
"MIT"
] | null | null | null | tools/clean_invalid_image.py | RingLcy/jekyll-theme-chirpy | 150aa10f5f241a6170cd5f83c515e44fb052f071 | [
"MIT"
] | null | null | null | tools/clean_invalid_image.py | RingLcy/jekyll-theme-chirpy | 150aa10f5f241a6170cd5f83c515e44fb052f071 | [
"MIT"
] | null | null | null | import os
post_dir = "../_posts"
img_dir = "../assets/img"
img_list = []
for each_file in os.listdir(img_dir):
if (os.path.isdir(os.path.join(img_dir, each_file))):
continue
img_list.append(each_file)
valid_img_list = []
for each_file in os.listdir(post_dir):
with open(os.path.join(post_dir, each_file), encoding="utf-8") as fh:
data = fh.read()
for img in img_list:
if img not in valid_img_list and img in data:
valid_img_list.append(img)
for img in img_list:
if img not in valid_img_list:
os.remove(os.path.join(img_dir, img))
# print(img)
| 25.28 | 73 | 0.637658 |
acf5de02043703fffbd75e24964cb21da6b9af75 | 4,926 | py | Python | scripts/hex2bin.py | mhubig/intelhex | cfcfe38c2238f85ae4dfc1e1d22c763ad0a2ce66 | [
"BSD-3-Clause"
] | null | null | null | scripts/hex2bin.py | mhubig/intelhex | cfcfe38c2238f85ae4dfc1e1d22c763ad0a2ce66 | [
"BSD-3-Clause"
] | null | null | null | scripts/hex2bin.py | mhubig/intelhex | cfcfe38c2238f85ae4dfc1e1d22c763ad0a2ce66 | [
"BSD-3-Clause"
] | 2 | 2015-12-09T13:03:06.000Z | 2021-10-05T05:20:07.000Z | #!/usr/bin/python
# Copyright (c) 2005,2006,2007,2008,2010,2011,2012,2013 Alexander Belchenko
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain
# the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce
# the above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names
# of its contributors may be used to endorse
# or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Intel HEX file format hex2bin convertor utility.'''
VERSION = '1.5.1'
if __name__ == '__main__':
import getopt
import os
import sys
from intelhex import hex2bin
usage = '''Hex2Bin convertor utility.
Usage:
python hex2bin.py [options] INFILE [OUTFILE]
Arguments:
INFILE name of hex file for processing.
OUTFILE name of output file. If omitted then output
will be writing to stdout.
Options:
-h, --help this help message.
-v, --version version info.
-p, --pad=FF pad byte for empty spaces (ascii hex value).
-r, --range=START:END specify address range for writing output
(ascii hex value).
Range can be in form 'START:' or ':END'.
-l, --length=NNNN,
-s, --size=NNNN size of output (decimal value).
'''
pad = None
start = None
end = None
size = None
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:r:l:s:",
["help", "version", "pad=", "range=",
"length=", "size="])
for o, a in opts:
if o in ("-h", "--help"):
print(usage)
sys.exit(0)
elif o in ("-v", "--version"):
print(VERSION)
sys.exit(0)
elif o in ("-p", "--pad"):
try:
pad = int(a, 16) & 0x0FF
except:
raise getopt.GetoptError('Bad pad value')
elif o in ("-r", "--range"):
try:
l = a.split(":")
if l[0] != '':
start = int(l[0], 16)
if l[1] != '':
end = int(l[1], 16)
except:
raise getopt.GetoptError('Bad range value(s)')
elif o in ("-l", "--lenght", "-s", "--size"):
try:
size = int(a, 10)
except:
raise getopt.GetoptError('Bad size value')
if start != None and end != None and size != None:
raise getopt.GetoptError('Cannot specify START:END and SIZE simultaneously')
if not args:
raise getopt.GetoptError('Hex file is not specified')
if len(args) > 2:
raise getopt.GetoptError('Too many arguments')
except getopt.GetoptError, msg:
txt = 'ERROR: '+str(msg) # that's required to get not-so-dumb result from 2to3 tool
print(txt)
print(usage)
sys.exit(2)
fin = args[0]
if not os.path.isfile(fin):
txt = "ERROR: File not found: %s" % fin # that's required to get not-so-dumb result from 2to3 tool
print(txt)
sys.exit(1)
if len(args) == 2:
fout = args[1]
else:
# write to stdout
fout = sys.stdout
# force binary mode for stdout on Windows
if os.name == 'nt':
f_fileno = getattr(sys.stdout, 'fileno', None)
if f_fileno:
fileno = f_fileno()
if fileno >= 0:
import msvcrt
msvcrt.setmode(fileno, os.O_BINARY)
sys.exit(hex2bin(fin, fout, start, end, size, pad))
| 35.185714 | 107 | 0.572473 |
acf5de2adf7e1b7caac6dcd0977ab63fc5795a40 | 13,165 | py | Python | mlrun/config/default.py | rfan-debug/mlrun | aaaab86a0a58d37313ab1967ddcc54426b1987f3 | [
"Apache-2.0"
] | null | null | null | mlrun/config/default.py | rfan-debug/mlrun | aaaab86a0a58d37313ab1967ddcc54426b1987f3 | [
"Apache-2.0"
] | null | null | null | mlrun/config/default.py | rfan-debug/mlrun | aaaab86a0a58d37313ab1967ddcc54426b1987f3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import expanduser
default_config = {
"namespace": "", # default kubernetes namespace
"dbpath": "", # db/api url
# url to nuclio dashboard api (can be with user & token, e.g. https://username:password@dashboard-url.com)
"nuclio_dashboard_url": "",
"nuclio_version": "",
"default_nuclio_runtime": "python:3.7",
"nest_asyncio_enabled": "", # enable import of nest_asyncio for corner cases with old jupyter, set "1"
"ui_url": "", # remote/external mlrun UI url (for hyperlinks) (This is deprecated in favor of the ui block)
"remote_host": "",
"version": "", # will be set to current version
"images_tag": "", # tag to use with mlrun images e.g. mlrun/mlrun (defaults to version)
"images_registry": "", # registry to use with mlrun images e.g. quay.io/ (defaults to empty, for dockerhub)
# comma separated list of images that are in the specified images_registry, and therefore will be enriched with this
# registry when used. default to mlrun/* which means any image which is of the mlrun repository (mlrun/mlrun,
# mlrun/ml-base, etc...)
"images_to_enrich_registry": "^mlrun/*",
"kfp_ttl": "14400", # KFP ttl in sec, after that completed PODs will be deleted
"kfp_image": "", # image to use for KFP runner (defaults to mlrun/mlrun)
"dask_kfp_image": "", # image to use for dask KFP runner (defaults to mlrun/ml-base)
"igz_version": "", # the version of the iguazio system the API is running on
"iguazio_api_url": "", # the url to iguazio api
"spark_app_image": "", # image to use for spark operator app runtime
"spark_app_image_tag": "", # image tag to use for spark opeartor app runtime
"spark_history_server_path": "", # spark logs directory for spark history server
"spark_operator_version": "spark-2", # the version of the spark operator in use
"builder_alpine_image": "alpine:3.13.1", # builder alpine image (as kaniko's initContainer)
"package_path": "mlrun", # mlrun pip package
"default_base_image": "mlrun/mlrun", # default base image when doing .deploy()
"default_project": "default", # default project name
"default_archive": "", # default remote archive URL (for build tar.gz)
"mpijob_crd_version": "", # mpijob crd version (e.g: "v1alpha1". must be in: mlrun.runtime.MPIJobCRDVersions)
"hub_url": "https://raw.githubusercontent.com/mlrun/functions/{tag}/{name}/function.yaml",
"ipython_widget": True,
"log_level": "INFO",
# log formatter (options: human | json)
"log_formatter": "human",
"submit_timeout": "180", # timeout when submitting a new k8s resource
# runtimes cleanup interval in seconds
"runtimes_cleanup_interval": "300",
# runs monitoring interval in seconds
"runs_monitoring_interval": "30",
# the grace period (in seconds) that will be given to runtime resources (after they're in terminal state)
# before deleting them
"runtime_resources_deletion_grace_period": "14400",
"scrape_metrics": True,
# sets the background color that is used in printed tables in jupyter
"background_color": "#4EC64B",
"artifact_path": "", # default artifacts path/url
# FIXME: Adding these defaults here so we won't need to patch the "installing component" (provazio-controller) to
# configure this values on field systems, for newer system this will be configured correctly
"v3io_api": "http://v3io-webapi:8081",
"v3io_framesd": "http://framesd:8080",
"datastore": {"async_source_mode": "disabled"},
# default node selector to be applied to all functions - json string base64 encoded format
"default_function_node_selector": "e30=",
# default priority class to be applied to functions running on k8s cluster
"default_function_priority_class_name": "",
# valid options for priority classes - separated by a comma
"valid_function_priority_class_names": "",
"function_defaults": {
"image_by_kind": {
"job": "mlrun/mlrun",
"serving": "mlrun/mlrun",
"nuclio": "mlrun/mlrun",
"remote": "mlrun/mlrun",
"dask": "mlrun/ml-base",
"mpijob": "mlrun/ml-models",
}
},
"httpdb": {
"port": 8080,
"dirpath": expanduser("~/.mlrun/db"),
"dsn": "sqlite:////mlrun/db/mlrun.db?check_same_thread=false",
"debug": False,
"user": "",
"password": "",
"token": "",
"logs_path": "/mlrun/db/logs",
"data_volume": "",
"real_path": "",
"db_type": "sqldb",
"max_workers": "",
"db": {"commit_retry_timeout": 30, "commit_retry_interval": 3},
"jobs": {
# whether to allow to run local runtimes in the API - configurable to allow the scheduler testing to work
"allow_local_run": False,
},
"authentication": {
"mode": "none", # one of none, basic, bearer, iguazio
"basic": {"username": "", "password": ""},
"bearer": {"token": ""},
"iguazio": {
"session_verification_endpoint": "data_sessions/verifications/app_service",
},
},
"nuclio": {
# One of ClusterIP | NodePort
"default_service_type": "NodePort",
# The following modes apply when user did not configure an ingress
#
# name | description
# ---------------------------------------------------------------------
# never | never enrich with an ingress
# always | always enrich with an ingress, regardless the service type
# onClusterIP | enrich with an ingress only when `mlrun.config.httpdb.nuclio.default_service_type`
# is set to ClusterIP
# ---------------------------------------------------------------------
# Note: adding a mode requires special handling on
# - mlrun.runtimes.constants.NuclioIngressAddTemplatedIngressModes
# - mlrun.runtimes.function.enrich_function_with_ingress
"add_templated_ingress_host_mode": "never",
},
"authorization": {
"mode": "none", # one of none, opa
"opa": {
"address": "",
"request_timeout": 10,
"permission_query_path": "",
"permission_filter_path": "",
"log_level": 0,
},
},
"scheduling": {
# the minimum interval that will be allowed between two scheduled jobs - e.g. a job wouldn't be
# allowed to be scheduled to run more then 2 times in X. Can't be less then 1 minute, "0" to disable
"min_allowed_interval": "10 minutes",
"default_concurrency_limit": 1,
# Firing our jobs include things like creating pods which might not be instant, therefore in the case of
# multiple schedules scheduled to the same time, there might be delays, the default of the scheduler for
# misfire_grace_time is 1 second, we do not want jobs not being scheduled because of the delays so setting
# it to None. the default for coalesce it True just adding it here to be explicit
"scheduler_config": '{"job_defaults": {"misfire_grace_time": null, "coalesce": true}}',
# one of enabled, disabled, auto (in which it will be determined by whether the authorization mode is opa)
"schedule_credentials_secrets_store_mode": "auto",
},
"projects": {
"leader": "mlrun",
"followers": "",
# This is used as the interval for the sync loop both when mlrun is leader and follower
"periodic_sync_interval": "1 minute",
"counters_cache_ttl": "10 seconds",
# access key to be used when the leader is iguazio and polling is done from it
"iguazio_access_key": "",
# the initial implementation was cache and was working great, now it's not needed because we get (read/list)
# from leader because of some auth restriction, we will probably go back to it at some point since it's
# better performance wise, so made it a mode
# one of: cache, none
"follower_projects_store_mode": "cache",
"project_owners_cache_ttl": "30 seconds",
},
# The API needs to know what is its k8s svc url so it could enrich it in the jobs it creates
"api_url": "",
"builder": {
# setting the docker registry to be used for built images, can include the repository as well, e.g.
# index.docker.io/<username>, if not included repository will default to mlrun
"docker_registry": "",
"docker_registry_secret": "",
# the requirement specifier used by the builder when installing mlrun in images when it runs
# pip install <requirement_specifier>, e.g. mlrun==0.5.4, mlrun~=0.5,
# git+https://github.com/mlrun/mlrun@development. by default uses the version
"mlrun_version_specifier": "",
"kaniko_image": "gcr.io/kaniko-project/executor:v0.24.0", # kaniko builder image
"kaniko_init_container_image": "alpine:3.13.1",
# additional docker build args in json encoded base64 format
"build_args": "",
},
"v3io_api": "",
"v3io_framesd": "",
},
"model_endpoint_monitoring": {
"serving_stream_args": {"shard_count": 1, "retention_period_hours": 24},
"drift_thresholds": {"default": {"possible_drift": 0.5, "drift_detected": 0.7}},
"store_prefixes": {
"default": "v3io:///users/pipelines/{project}/model-endpoints/{kind}",
"user_space": "v3io:///projects/{project}/model-endpoints/{kind}",
},
"batch_processing_function_branch": "master",
},
"secret_stores": {
"vault": {
# URLs to access Vault. For example, in a local env (Minikube on Mac) these would be:
# http://docker.for.mac.localhost:8200
"url": "",
"remote_url": "",
"role": "",
"token_path": "~/.mlrun/vault",
"project_service_account_name": "mlrun-vault-{project}",
"token_ttl": 180000,
# This config is for debug/testing purposes only!
"user_token": "",
},
"azure_vault": {
"url": "https://{name}.vault.azure.net",
"default_secret_name": None,
"secret_path": "~/.mlrun/azure_vault",
},
"kubernetes": {
"project_secret_name": "mlrun-project-secrets-{project}",
"env_variable_prefix": "MLRUN_K8S_SECRET__",
},
},
"feature_store": {
"data_prefixes": {
"default": "v3io:///projects/{project}/FeatureStore/{name}/{kind}",
"nosql": "v3io:///projects/{project}/FeatureStore/{name}/{kind}",
},
"default_targets": "parquet,nosql",
"default_job_image": "mlrun/mlrun",
"flush_interval": None,
},
"ui": {
"projects_prefix": "projects", # The UI link prefix for projects
"url": "", # remote/external mlrun UI url (for hyperlinks)
},
"marketplace": {
"k8s_secrets_project_name": "-marketplace-secrets",
"catalog_filename": "catalog.json",
"default_source": {
# Set to false to avoid creating a global source (for example in a dark site)
"create": True,
"name": "mlrun_global_hub",
"description": "MLRun global function hub",
"url": "https://raw.githubusercontent.com/mlrun/marketplace",
"channel": "master",
},
},
"storage": {
# What type of auto-mount to use for functions. Can be one of: none, auto, v3io_credentials, v3io_fuse, pvc.
# Default is auto - which is v3io_credentials when running on Iguazio. If not Iguazio: pvc if the
# MLRUN_PVC_MOUNT env is configured or auto_mount_params contain "pvc_name". Otherwise will do nothing (none).
"auto_mount_type": "auto",
# Extra parameters to pass to the mount call (will be passed as kwargs). Parameters can be either:
# 1. A string of comma-separated parameters, using this format: "param1=value1,param2=value2"
# 2. A base-64 encoded json dictionary containing the list of parameters
"auto_mount_params": "",
},
}
| 51.627451 | 120 | 0.610786 |
acf5de4c098a8978af06f01379e2b1c092fd9bf0 | 624 | py | Python | gendata.py | rsnemmen/partial-correlation | cf842595f34d718f7a25613710c7a9b0c8d3be18 | [
"MIT"
] | null | null | null | gendata.py | rsnemmen/partial-correlation | cf842595f34d718f7a25613710c7a9b0c8d3be18 | [
"MIT"
] | null | null | null | gendata.py | rsnemmen/partial-correlation | cf842595f34d718f7a25613710c7a9b0c8d3be18 | [
"MIT"
] | null | null | null | """
Generates mock data sets for partial correlation analysis with cens_tau.f.
"""
import numpy
"""
First generates a test dataset x,y,z in which
x=az+b
y=cz+d
such that the correlation between x and y is actually driven by their mutual
correlation with z. I add white noise to the simulated data.
"""
# x,y,z
z=numpy.linspace(0,10,50)
noisex=numpy.random.normal(size=z.size)
noisey=numpy.random.normal(size=z.size)
x=z+10.+noisex
y=5.*z+3.+noisey
cens=numpy.ones(x.size,dtype=numpy.int)
# Exports to a data file
numpy.savetxt('test01.dat',numpy.transpose((x,cens,y,cens,z,cens)),fmt='%10.4f %i %10.4f %i %10.4f %i') | 26 | 103 | 0.724359 |
acf5debc3132024e5b4dc27db80ab8d5bf08941c | 4,446 | py | Python | examples/echo-server-poll.py | farisachugthai/pyuv | 39342fc2fd688f2fb2120d3092dd9cf52f537de2 | [
"MIT"
] | 826 | 2015-01-02T15:03:20.000Z | 2022-03-28T01:32:43.000Z | examples/echo-server-poll.py | farisachugthai/pyuv | 39342fc2fd688f2fb2120d3092dd9cf52f537de2 | [
"MIT"
] | 70 | 2015-01-09T13:55:03.000Z | 2022-03-31T11:00:16.000Z | examples/echo-server-poll.py | farisachugthai/pyuv | 39342fc2fd688f2fb2120d3092dd9cf52f537de2 | [
"MIT"
] | 98 | 2015-01-27T08:30:21.000Z | 2021-12-13T08:12:51.000Z |
import sys
import socket
import signal
import weakref
import errno
import logging
import pyuv
logging.basicConfig(level=logging.DEBUG)
STOPSIGNALS = (signal.SIGINT, signal.SIGTERM)
NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK)
if sys.platform == "win32":
NONBLOCKING = NONBLOCKING + (errno.WSAEWOULDBLOCK,)
class Connection(object):
def __init__(self, sock, address, loop):
self.sock = sock
self.address = address
self.sock.setblocking(0)
self.buf = ""
self.watcher = pyuv.Poll(loop, self.sock.fileno())
self.watcher.start(pyuv.UV_READABLE, self.io_cb)
logging.debug("{0}: ready".format(self))
def reset(self, events):
self.watcher.start(events, self.io_cb)
def handle_error(self, msg, level=logging.ERROR, exc_info=True):
logging.log(level, "{0}: {1} --> closing".format(self, msg), exc_info=exc_info)
self.close()
def handle_read(self):
try:
buf = self.sock.recv(1024)
except socket.error as err:
if err.args[0] not in NONBLOCKING:
self.handle_error("error reading from {0}".format(self.sock))
if buf:
self.buf += buf
self.reset(pyuv.UV_READABLE | pyuv.UV_WRITABLE)
else:
self.handle_error("connection closed by peer", logging.DEBUG, False)
def handle_write(self):
try:
sent = self.sock.send(self.buf)
except socket.error as err:
if err.args[0] not in NONBLOCKING:
self.handle_error("error writing to {0}".format(self.sock))
else :
self.buf = self.buf[sent:]
if not self.buf:
self.reset(pyuv.UV_READABLE)
def io_cb(self, watcher, revents, error):
if error is not None:
logging.error("Error in connection: %d: %s" % (error, pyuv.errno.strerror(error)))
return
if revents & pyuv.UV_READABLE:
self.handle_read()
elif revents & pyuv.UV_WRITABLE:
self.handle_write()
def close(self):
self.watcher.stop()
self.watcher = None
self.sock.close()
logging.debug("{0}: closed".format(self))
class Server(object):
def __init__(self, address):
self.sock = socket.socket()
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(address)
self.sock.setblocking(0)
self.address = self.sock.getsockname()
self.loop = pyuv.Loop.default_loop()
self.poll_watcher = pyuv.Poll(self.loop, self.sock.fileno())
self.async = pyuv.Async(self.loop, self.async_cb)
self.conns = weakref.WeakValueDictionary()
self.signal_watchers = set()
def handle_error(self, msg, level=logging.ERROR, exc_info=True):
logging.log(level, "{0}: {1} --> stopping".format(self, msg), exc_info=exc_info)
self.stop()
def signal_cb(self, handle, signum):
self.async.send()
def async_cb(self, handle):
handle.close()
self.stop()
def io_cb(self, watcher, revents, error):
try:
while True:
try:
sock, address = self.sock.accept()
except socket.error as err:
if err.args[0] in NONBLOCKING:
break
else:
raise
else:
self.conns[address] = Connection(sock, address, self.loop)
except Exception:
self.handle_error("error accepting a connection")
def start(self):
self.sock.listen(socket.SOMAXCONN)
self.poll_watcher.start(pyuv.UV_READABLE, self.io_cb)
for sig in STOPSIGNALS:
handle = pyuv.Signal(self.loop)
handle.start(self.signal_cb, sig)
self.signal_watchers.add(handle)
logging.debug("{0}: started on {0.address}".format(self))
self.loop.run()
logging.debug("{0}: stopped".format(self))
def stop(self):
self.poll_watcher.stop()
for watcher in self.signal_watchers:
watcher.stop()
self.signal_watchers.clear()
self.sock.close()
for conn in self.conns.values():
conn.close()
logging.debug("{0}: stopping".format(self))
if __name__ == "__main__":
server = Server(("127.0.0.1", 9876))
server.start()
| 31.531915 | 94 | 0.587494 |
acf5e051b7cfc53df97f2c42d0e287651dd2f024 | 30,528 | py | Python | lingvo/core/bn_layers.py | Harshs27/lingvo | bd396e651488b2e2c4a7416be077b4a0226c87c8 | [
"Apache-2.0"
] | null | null | null | lingvo/core/bn_layers.py | Harshs27/lingvo | bd396e651488b2e2c4a7416be077b4a0226c87c8 | [
"Apache-2.0"
] | null | null | null | lingvo/core/bn_layers.py | Harshs27/lingvo | bd396e651488b2e2c4a7416be077b4a0226c87c8 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Batch normalization layers."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import summary_utils
from tensorflow.python.ops import nn # pylint:disable=g-direct-tensorflow-import
from tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import
_BN_FLOPS_PER_ELEMENT = 10
# TODO(rpang): move AddingAccumulator to a separate library.
class AddingAccumulator(base_layer.Accumulator):
"""Accumulator for the sufficient statistics."""
def __init__(self, shape, dtype):
super().__init__()
self.dtype = dtype
self.shape = shape
def DefaultValue(self):
"""Returns the default value of the accumulator."""
return tf.zeros(self.shape, dtype=self.dtype)
def Update(self, value):
"""Adds value to the accumulator."""
self.SetValue(self.GetValue() + tf.cast(value, self.dtype))
def ComputeMomentsWithPadding(inputs,
padding,
reduce_over_dims,
cumulative_axis=None,
enable_cross_replica_sum_on_tpu=False,
keepdims=False):
"""Computes mean and variance over the valid data points in inputs."""
mask = 1.0 - padding
inputs = py_utils.with_dependencies([
py_utils.assert_equal(tf.rank(inputs), tf.rank(mask)),
py_utils.assert_greater_equal(mask, tf.zeros_like(mask)),
], inputs)
sum_v = tf.reduce_sum(
inputs * tf.cast(mask, inputs.dtype), reduce_over_dims, keepdims=keepdims)
count_v = tf.reduce_sum(mask, reduce_over_dims, keepdims=keepdims)
if cumulative_axis is not None:
sum_v = tf.math.cumsum(sum_v, axis=cumulative_axis)
count_v = tf.math.cumsum(count_v, axis=cumulative_axis)
# Input shape is guaranteed to be a multiple of mask shape because the
# inputs * mask op above was successfully broadcasted.
input_size_on_reduced_dims = tf.reduce_prod(
tf.gather(tf.shape(inputs), reduce_over_dims))
mask_size_on_reduced_dims = tf.reduce_prod(
tf.gather(tf.shape(mask), reduce_over_dims))
mask_multiplier = tf.math.truediv(input_size_on_reduced_dims,
mask_size_on_reduced_dims)
count_v *= tf.cast(mask_multiplier, count_v.dtype)
if py_utils.use_tpu() and enable_cross_replica_sum_on_tpu:
sum_v = tf.tpu.cross_replica_sum(sum_v)
count_v = tf.tpu.cross_replica_sum(count_v)
count_v = tf.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = tf.reduce_sum(
(inputs - mean) * (inputs - mean) * mask,
reduce_over_dims,
keepdims=keepdims)
if cumulative_axis is not None:
sum_vv = tf.math.cumsum(sum_vv, axis=cumulative_axis)
if py_utils.use_tpu() and enable_cross_replica_sum_on_tpu:
sum_vv = tf.tpu.cross_replica_sum(sum_vv)
variance = py_utils.with_dependencies([
py_utils.assert_greater_equal(sum_vv, tf.zeros_like(sum_vv)),
], sum_vv / count_v)
return mean, variance
class BatchNormLayer(base_layer.BaseLayer):
"""Batch normalization layer."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.999,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define(
'enable_cross_replica_sum_on_tpu', True,
'If true, calls cross_replica_sum to the aggregate moving averages'
' across all replicas.')
p.Define(
'use_moving_avg_in_training', False,
'If True, use global moving avg (mean, variance) during training'
' to avoid mismatch between train and eval, which then'
' essentially acts as an adaptive normalization step.')
p.Define(
'freeze_bn_stats', False,
'If True, uses moving avg (mean, variance) during both training and '
'inference. It behaves like force_eval but the gamma/beta are still '
'trained when do_eval is False. The moving mean/var can be set by '
'loading pretrained checkpoints. A use case is training detectors '
'based on an pretrained checkpoint while BN stats are frozen.')
p.Define(
'gamma_zero_init', False,
'If True, initialize gamma to zeros according to the technique '
'introduced in the tech report: https://arxiv.org/abs/1706.02677')
# TODO(rpang): remove this hparam, as it is replaced
# by p.train.ema_decay_moving_vars.
p.Define(
'add_stats_to_moving_average_variables', None,
'If True, adds (mean, variance) to the MOVING_AVERAGE_VARIABLES '
'collection to be compatible with ema_decay. '
'Recommendation: set to True for new models, and to False to maintain '
'checkpoint compatibility.')
p.Define('set_padded_output_to_zero', True,
'If True, sets the padded outputs to zero.')
p.Define(
'use_fused_batch_norm_for_eval', False,
'If True, uses tf.compat.v1.nn.fused_batch_norm instead of '
'tf.nn.batch_normalization during eval. The fused version may be more '
'efficient but it has more restrictions on the expected input shapes.'
'The input tensor has to be rank 4, where the first dimension '
'corresponds to the batch, and the last dimension corresponds to the '
'features to normalize over. This usually corresponds to NHWC with '
'image inputs. Note that fused_batch_norm wants to track its own '
'mean and variance during training, so we are unable to use it '
'for training since we want to have a custom mean and variance to '
'support padding.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self._epsilon = 0.001
self._decay = p.decay
def _GetWeightShape(self):
return [self.params.dim]
def _CreateLayerVariables(self):
p = self.params
pc = py_utils.WeightParams(
shape=self._GetWeightShape(),
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
if not p.use_moving_avg_in_training:
self.CreateVariable('beta', pc)
if p.gamma_zero_init:
# zero initialization to BN gamma
self.CreateVariable('gamma', pc)
else:
# Note, The real gamma to use is 1 + gamma.
self.CreateVariable('gamma', pc, lambda x: 1.0 + x)
# Two statistics.
moving_collections = ['moving_vars', self.__class__.__name__ + '_vars']
if p.add_stats_to_moving_average_variables:
moving_collections += [tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
elif p.add_stats_to_moving_average_variables is None:
# TODO(rpang): force all models to set this param explicitly.
tf.logging.warning(
'BatchNormLayer.add_stats_to_moving_average_variables should be '
'set to True for new models, and to False explicitly for '
'checkpoint compatibility.')
# Add to the MOVING_AVERAGE_VARIABLES collection so that they are returned
# by tf.moving_average_variables() and included in EMA variables if
# ema_decay is enabled.
mva = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable(
'moving_mean',
mva,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
mvv = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(1.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable(
'moving_variance',
mvv,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
@property
def epsilon(self):
return self._epsilon
def _GetDefaultPaddings(self, inputs):
"""Gets the default paddings for an input."""
return tf.zeros(
tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)
def _GetBetaGamma(self, theta, inputs, **kwargs):
del inputs
del kwargs
p = self.params
if p.use_moving_avg_in_training:
beta = 0.0
gamma = 1.0
else:
beta = theta.beta
gamma = theta.gamma
return beta, gamma
def GetCurrentMoments(self, theta):
"""Gets the current computed moments, which should be applied at eval.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if p.use_moving_avg_in_training:
return self.vars.moving_mean, self.vars.moving_variance, 0.0, 1.0
else:
return (self.vars.moving_mean, self.vars.moving_variance, theta.beta,
theta.gamma)
def ComputeAndUpdateMoments(self, theta, inputs, paddings=None, **kwargs):
"""Computes moments and updates state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
paddings: The paddings tensor. Shaped [..., 1], with the same rank as the
input tensor.
**kwargs: Additional inputs.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if paddings is None:
paddings = self._GetDefaultPaddings(inputs)
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(paddings)[-1]], [1]),
], inputs)
with tf.name_scope(p.name):
if self.do_eval or p.freeze_bn_stats:
# The mean and variance used for normalization.
norm_mean, norm_variance = (self.vars.moving_mean,
self.vars.moving_variance)
else:
rank = tf.rank(paddings)
reduce_over_dims = tf.range(0, rank - 1)
mean, variance = ComputeMomentsWithPadding(
inputs, paddings, reduce_over_dims, None,
p.enable_cross_replica_sum_on_tpu)
py_utils.UpdateBatchNormVars(self.vars.moving_mean, mean, self._decay)
py_utils.UpdateBatchNormVars(self.vars.moving_variance, variance,
self._decay)
# Add some summaries for visualization.
summary_utils.histogram('%s_mean' % p.name, tf.cast(mean, tf.float32))
summary_utils.histogram('%s_variance' % p.name,
tf.cast(variance, tf.float32))
summary_utils.histogram('%s_moving_mean' % p.name,
tf.cast(self.vars.moving_mean, tf.float32))
summary_utils.histogram('%s_moving_variance' % p.name,
tf.cast(self.vars.moving_variance, tf.float32))
summary_utils.histogram(
'%s_mean_diff' % p.name,
tf.cast(
tf.cast(mean, self.vars.moving_mean.dtype.base_dtype) -
self.vars.moving_mean, tf.float32))
summary_utils.histogram(
'%s_variance_diff' % p.name,
tf.cast(
tf.cast(variance, self.vars.moving_variance.dtype.base_dtype) -
self.vars.moving_variance, tf.float32))
if p.use_moving_avg_in_training:
# Use the global statistics for normalization.
# Control dependencies on mean and variance make sure
# moving_mean and variance will be updated for every training step.
norm_mean = py_utils.with_dependencies([mean], self.vars.moving_mean)
norm_variance = py_utils.with_dependencies([variance],
self.vars.moving_variance)
else:
# Use the batch statistics for normalization.
norm_mean = mean
norm_variance = variance
norm_mean = py_utils.CheckNumerics(
norm_mean, 'mean of %s failed numeric check' % p.name)
norm_variance = py_utils.CheckNumerics(
norm_variance, 'variance of %s failed numeric check' % p.name)
beta, gamma = self._GetBetaGamma(theta, inputs, **kwargs)
return norm_mean, norm_variance, beta, gamma
def _ComputeBN(self, inputs, paddings, gamma, beta, norm_mean, norm_variance):
p = self.params
with tf.control_dependencies([
py_utils.assert_greater_equal(norm_variance,
tf.zeros_like(norm_variance)),
py_utils.assert_shape_match([tf.shape(inputs)[-1]],
tf.shape(norm_mean)),
py_utils.assert_shape_match([tf.shape(inputs)[-1]],
tf.shape(norm_variance)),
]):
if p.use_fused_batch_norm_for_eval and (self.do_eval or
p.freeze_bn_stats):
bn_output, _, _ = nn.fused_batch_norm(
inputs,
gamma,
beta,
norm_mean,
norm_variance,
self._epsilon,
is_training=False)
else:
bn_output = tf.nn.batch_normalization(inputs, norm_mean, norm_variance,
beta, gamma, self._epsilon)
if p.set_padded_output_to_zero:
bn_output *= 1.0 - paddings
return bn_output
def FProp(self, theta, inputs, paddings=None):
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
paddings: The paddings tensor. Shaped [..., 1], with the same rank as the
input tensor.
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
p = self.params
if paddings is None:
paddings = self._GetDefaultPaddings(inputs)
with tf.name_scope(p.name):
norm_mean, norm_variance, beta, gamma = self.ComputeAndUpdateMoments(
theta, inputs, paddings)
return self._ComputeBN(inputs, paddings, gamma, beta, norm_mean,
norm_variance)
@classmethod
def FPropMeta(cls, p, inputs, padding=None):
py_utils.CheckShapes((inputs,))
return py_utils.NestedMap(
flops=inputs.num_elements() * _BN_FLOPS_PER_ELEMENT,
out_shapes=(inputs,))
class CategoricalBN(BatchNormLayer):
"""Implements a categorical BN which is akin to ...
https://arxiv.org/pdf/1809.11096.pdf
Specifically, the moving stats are category-agnostic, while {beta, gamma} are
category-aware.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('class_emb_dim', None, 'Dim of input class embedding.')
p.use_moving_avg_in_training = False
p.use_fused_batch_norm_for_eval = False
p.add_stats_to_moving_average_variables = True
return p
def __init__(self, params):
assert params.name
assert not params.use_moving_avg_in_training
assert not params.use_fused_batch_norm_for_eval
assert params.add_stats_to_moving_average_variables
super().__init__(params)
def _GetWeightShape(self):
return [self.params.class_emb_dim, self.params.dim]
def _GetBetaGamma(self, theta, inputs, **kwargs):
assert 'class_emb' in kwargs
class_emb = kwargs['class_emb']
# class_emb is a one-hot vector of shape [batch, class_emb_dim=num_classes].
class_ids = tf.math.argmax(class_emb, axis=-1, output_type=tf.int32)
# [batch, dim]
# Not using matmul/einsum to avoid potential precision problem on TPU with
# sparse inputs.
beta = tf.gather(theta.beta, class_ids)
gamma = tf.gather(theta.gamma, class_ids)
# Extend to [batch, 1, ... 1, dim]
batch = py_utils.GetShape(inputs)[0]
to_shape = tf.concat(
[[batch],
tf.ones([py_utils.GetRank(inputs) - 2], tf.int32), [self.params.dim]],
axis=0)
beta = tf.reshape(beta, to_shape)
gamma = tf.reshape(gamma, to_shape)
return beta, gamma
def FProp(self, theta, inputs, paddings, class_emb):
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [batch, ..., dim].
paddings: The paddings tensor. Shaped [batch, ..., 1], with the same rank
as the input tensor.
class_emb: The conditioning inputs, Shaped [batch, emb_dim].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
p = self.params
batch = py_utils.GetShape(inputs)[0]
class_emb = py_utils.HasShape(class_emb, [batch, p.class_emb_dim])
if not py_utils.use_tpu():
class_emb = py_utils.with_dependencies([
py_utils.assert_less_equal(
tf.cast(class_emb, tf.int32), 1, name='one_hot_assert1'),
py_utils.assert_greater_equal(
tf.cast(class_emb, tf.int32), 0, name='one_hot_assert2'),
py_utils.assert_equal(
tf.ones([batch], tf.int32),
tf.cast(tf.reduce_sum(class_emb, -1), tf.int32),
name='one_hot_assert3'),
], class_emb)
with tf.name_scope(p.name):
norm_mean, norm_variance, beta, gamma = self.ComputeAndUpdateMoments(
theta, inputs, paddings=paddings, class_emb=class_emb)
return self._ComputeBN(inputs, paddings, gamma, beta, norm_mean,
norm_variance)
class BatchNormLayerNoPadding(base_layer.BaseLayer):
"""Batchnorm layer without padding."""
@classmethod
def Params(cls):
"""Parameters for BatchNormLayerNoPadding."""
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.997,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define('epsilon', 0.001,
'Small float added to variance to avoid dividing by zero.')
p.Define(
'bn_group_size', 1,
'The number of shards participating in normalization when distributed'
' batchnorm is used. Only used for TPU.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name, 'Name of BatchNormLayerNoPadding is not set.'
p.fprop_dtype = None
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
# Skip L-P regularization for these variables.
collections = [
self.__class__.__name__ + '_vars', py_utils.SKIP_LP_REGULARIZATION
]
pc = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=collections)
self.CreateVariable('beta', pc)
# Note, The real gamma to use is 1 + gamma.
self.CreateVariable('gamma', pc, lambda x: 1.0 + x)
moving_collections = [
'moving_vars', tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
self.__class__.__name__ + '_vars'
]
mva = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=moving_collections)
# Two statistics computed from sufficient stats.
self.CreateVariable('moving_mean', mva, trainable=False)
mvv = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(1.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable('moving_variance', mvv, trainable=False)
# Accumulate bn sufficient stats over micro-batches.
dim = self.vars.beta.shape[0]
self.RegisterAccumulator('counts', AddingAccumulator([], p.dtype))
self.RegisterAccumulator('mean_ss', AddingAccumulator([dim], p.dtype))
self.RegisterAccumulator('variance_ss', AddingAccumulator([dim], p.dtype))
def PostTrainingStepUpdate(self, global_step):
"""Updates moving_mean, moving_variance after each training step."""
p = self.params
# Get sufficient stats that accumulates over microbatches.
counts = self.accumulators.counts.GetValue()
mean_ss = self.accumulators.mean_ss.GetValue()
variance_ss = self.accumulators.variance_ss.GetValue()
# Compute batch mean and batch variance from sufficient stats
mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)
decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)
# Update moving_mean, moving_variance from batch mean and batch variance.
with tf.name_scope(p.name) as scope:
with tf.ops.colocate_with(self.vars.moving_mean):
mean_update = tf.assign_sub(
self.vars.moving_mean,
tf.where(
tf.greater(counts, 0.5),
(self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,
tf.zeros_like(self.vars.moving_mean)),
name='moving_mean_update')
with tf.ops.colocate_with(self.vars.moving_variance):
var_update = tf.assign_sub(
self.vars.moving_variance,
tf.where(
tf.greater(counts, 0.5),
(self.vars.moving_variance - tf.cast(variance, p.dtype)) *
decay, tf.zeros_like(self.vars.moving_variance)),
name='moving_variance_update')
py_utils.CheckNumerics(
self.vars.moving_mean,
'moving mean of {} failed numeric check'.format(scope))
py_utils.CheckNumerics(
self.vars.moving_variance,
'moving variance of {} failed numeric check'.format(scope))
self.accumulators.counts.Reset()
self.accumulators.mean_ss.Reset()
self.accumulators.variance_ss.Reset()
return tf.group(mean_update, var_update)
def _Moments(self, inputs, group_size):
"""Computes mean and variance over N,H,W dimensions in inputs."""
counts, mean_ss, variance_ss, _, = tf.nn.sufficient_statistics(
inputs, axes=[0, 1, 2], keepdims=False)
self.accumulators.counts.Update(counts)
self.accumulators.mean_ss.Update(mean_ss)
self.accumulators.variance_ss.Update(variance_ss)
# Distributed batch norm that computes sufficient statistics from group_size
# replicas. This is useful when batch_size_per_replica is too small to
# compute reliable sufficient statistics.
if py_utils.use_tpu() and group_size > 1:
group_assignment = None
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is not None:
if num_shards < group_size:
raise ValueError('TPU shards={} less than bn_gropu_size={}.'.format(
num_shards, group_size))
if num_shards % group_size:
raise ValueError(
'TPU shards={} not divisible by bn_group_size={}.'.format(
num_shards, group_size))
num_groups = num_shards // group_size
group_assignment = []
for g in range(num_groups):
replica_ids = [g * group_size + i for i in range(group_size)]
group_assignment.append(replica_ids)
counts *= group_size
mean_ss = tf.tpu.cross_replica_sum(mean_ss, group_assignment)
variance_ss = tf.tpu.cross_replica_sum(variance_ss, group_assignment)
# At each micro-step, batch_mean and batch_variance are computed
# to normalize inputs. But they are not used to update moving_mean and
# moving_variance variables until the last micro batch.
mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)
return mean, variance
def FProp(self, theta, inputs):
"""Applies batch normalization.
Using the implementation in github.com/
tensorflow/tpu/blob/master/models/official/amoeba_net/network_utils.py#L550
Args:
theta: A nested map object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
p = self.params
inputs_dtype = inputs.dtype
inputs = tf.cast(inputs, p.dtype)
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(inputs)[-1]], tf.shape(
theta.beta))
], inputs)
with tf.name_scope(p.name) as scope:
if self.do_eval:
outputs = tf.nn.batch_normalization(inputs, theta.moving_mean,
theta.moving_variance,
theta.beta, theta.gamma, p.epsilon)
else:
mean, variance = self._Moments(inputs, p.bn_group_size)
mean = py_utils.CheckNumerics(
mean, 'mean of {} failed numeric check'.format(scope))
variance = py_utils.CheckNumerics(
variance, 'variance of {} failed numeric check'.format(scope))
outputs = tf.nn.batch_normalization(inputs, mean, variance, theta.beta,
theta.gamma, p.epsilon)
outputs.set_shape(inputs.get_shape())
return tf.cast(outputs, inputs_dtype)
@classmethod
def FPropMeta(cls, p, inputs):
"""Returns metadata about the `FProp` computation for this layer."""
py_utils.CheckShapes((inputs,))
return py_utils.NestedMap(
flops=inputs.num_elements() * _BN_FLOPS_PER_ELEMENT,
out_shapes=(inputs,))
class GroupNormLayer(base_layer.BaseLayer):
"""Group normalization layer(https://arxiv.org/abs/1803.08494)."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define('num_groups', 32, 'Number of groups for GroupNorm.')
p.Define('min_group_size', 1, 'Minimum group size for GroupNorm')
p.Define('cumulative', False, 'If true, only normalize by current and '
'previous time steps.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.num_groups > 0
assert p.min_group_size > 0
if p.dim >= p.num_groups:
assert p.dim % p.num_groups == 0, ('p.dim({0}) is not dividable by '
'p.num_groups({1})').format(
p.dim, p.num_groups)
self._epsilon = 0.001
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
collections = [
self.__class__.__name__ + '_vars', py_utils.SKIP_LP_REGULARIZATION
]
pc = py_utils.WeightParams(
shape=[1, 1, 1, p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=collections)
self.CreateVariable('beta', pc)
# Note, The real gamma to use is 1 + gamma.
self.CreateVariable('gamma', pc, lambda x: 1.0 + x)
def FProp(self, theta, inputs, paddings=None):
"""Apply group normalization.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: The inputs tensor with shape [batch_size, height, width, channel].
paddings: The paddings tensor with shape [batch_size, height]. Intended to
be used for sequence processing where `height` is `time`.
Returns:
A single tensor as the output after applying group normalization, with
the same shape as 'inputs'. Or a output, output_paddings pair if input
paddings is not None.
"""
p = self.params
n, h, w, c = tf.unstack(tf.shape(inputs), axis=0, num=4)
group_size = p.dim // p.num_groups
num_groups = p.num_groups
min_group_size = p.min_group_size if p.dim > p.min_group_size else p.dim
if group_size <= min_group_size:
group_size = min_group_size
num_groups = p.dim // group_size
with tf.name_scope(p.name):
x = tf.reshape(inputs, [n, h, w, num_groups, group_size])
if paddings is None:
counts, means_ss, variance_ss, _, = tf.nn.sufficient_statistics(
x, axes=[1, 2, 4], keepdims=True)
norm_mean, norm_variance = tf.nn.normalize_moments(
counts, means_ss, variance_ss, None)
else:
expanded_paddings = tf.reshape(paddings, [n, h, 1, 1, 1])
if p.cumulative:
norm_mean, norm_variance = ComputeMomentsWithPadding(
x,
expanded_paddings,
reduce_over_dims=[2, 4],
cumulative_axis=1,
keepdims=True)
else:
norm_mean, norm_variance = ComputeMomentsWithPadding(
x, expanded_paddings, [1, 2, 4], keepdims=True)
norm_mean = py_utils.CheckNumerics(
norm_mean, 'mean of %s failed numeric check' % p.name)
norm_variance = py_utils.CheckNumerics(
norm_variance, 'variance of %s failed numeric check' % p.name)
beta = theta.beta
gamma = theta.gamma
t = h if p.cumulative else 1
with tf.control_dependencies([
py_utils.assert_greater_equal(norm_variance,
tf.cast(0., norm_variance.dtype)),
py_utils.assert_shape_match([n, t, 1, num_groups, 1],
tf.shape(norm_mean)),
py_utils.assert_shape_match([n, t, 1, num_groups, 1],
tf.shape(norm_variance)),
]):
x = (x - norm_mean) / tf.sqrt(norm_variance + self._epsilon)
x = tf.reshape(x, [n, h, w, c])
gn_output = x * gamma + beta
gn_output = tf.reshape(gn_output, [n, h, w, c])
if paddings is None:
return gn_output
else:
return gn_output, paddings
@classmethod
def FPropMeta(cls, p, inputs):
py_utils.CheckShapes((inputs,))
flops_per_element = 10 # Approximately 10 flops per element.
return py_utils.NestedMap(
flops=inputs.num_elements() * flops_per_element, out_shapes=(inputs,))
| 39.239075 | 91 | 0.648159 |
acf5e0fe46c888126d52157df8fc53fc134867a2 | 83 | py | Python | deco1.py | rbobot/test-decorators | a51a65b741272d1d986e42936d6d6c265cf271a3 | [
"MIT"
] | null | null | null | deco1.py | rbobot/test-decorators | a51a65b741272d1d986e42936d6d6c265cf271a3 | [
"MIT"
] | null | null | null | deco1.py | rbobot/test-decorators | a51a65b741272d1d986e42936d6d6c265cf271a3 | [
"MIT"
] | null | null | null | def div(a, b):
return a/b
my_var = div
print(type(my_var))
print(my_var(1, 2)) | 13.833333 | 19 | 0.638554 |
acf5e2136276cd2d43baf6c36c29bd8272a71942 | 9,540 | py | Python | integrations/common/marquez/provider/bigquery.py | hanbei/marquez | a573748c6b9696cbfdea5d1da1bfc7da14a14aa3 | [
"Apache-2.0"
] | 1 | 2021-07-16T13:13:08.000Z | 2021-07-16T13:13:08.000Z | integrations/common/marquez/provider/bigquery.py | hanbei/marquez | a573748c6b9696cbfdea5d1da1bfc7da14a14aa3 | [
"Apache-2.0"
] | null | null | null | integrations/common/marquez/provider/bigquery.py | hanbei/marquez | a573748c6b9696cbfdea5d1da1bfc7da14a14aa3 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import traceback
import attr
from typing import Tuple, Optional, Dict, List
from google.cloud import bigquery
from marquez.dataset import Dataset, Source
from marquez.models import DbTableSchema, DbColumn, DbTableName
from marquez.schema import GITHUB_LOCATION
from openlineage.facet import BaseFacet
from marquez.utils import get_from_nullable_chain
_BIGQUERY_CONN_URL = 'bigquery'
@attr.s
class BigQueryErrorRunFacet(BaseFacet):
"""
Represents errors that can happen during execution of BigqueryExtractor
:param clientError: represents errors originating in bigquery client
:param parserError: represents errors that happened during parsing SQL provided to bigquery
"""
clientError: str = attr.ib(default=None)
parserError: str = attr.ib(default=None)
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-error-run-facet.json"
@attr.s
class BigQueryJobRunFacet(BaseFacet):
"""
Facet that represents relevant statistics of bigquery run.
:param cached: bigquery caches query results. Rest of the statistics will not be provided
for cached queries.
:param billedBytes: how many bytes bigquery bills for.
:param properties: full property tree of bigquery run.
"""
cached: bool = attr.ib()
billedBytes: int = attr.ib(default=None)
properties: str = attr.ib(default=None)
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-statistics-run-facet.json"
@attr.s
class BigQueryStatisticsDatasetFacet(BaseFacet):
"""
Facet that represents statistics of output dataset resulting from bigquery run.
:param outputRows: how many rows query produced.
:param size: size of output dataset in bytes.
"""
rowCount: int = attr.ib()
size: int = attr.ib()
@staticmethod
def _get_schema() -> str:
return GITHUB_LOCATION + "bq-statistics-dataset-facet.json"
@attr.s
class BigQueryFacets:
run_facets: Dict[str, BaseFacet] = attr.ib()
inputs: List[Dataset] = attr.ib()
output: Optional[Dataset] = attr.ib(default=None)
class BigQueryDatasetsProvider:
def __init__(
self,
client: Optional[bigquery.Client] = None,
logger: Optional[logging.Logger] = None
):
self.client = client
if client is None:
self.client = bigquery.Client()
self.logger = logger
if logger is None:
self.logger = logging.getLogger(__name__)
def get_facets(self, job_id: str) -> BigQueryFacets:
inputs = []
output = None
run_facets = {}
try:
try:
job = self.client.get_job(job_id=job_id)
props = job._properties
run_stat_facet, dataset_stat_facet = self._get_output_statistics(props)
run_facets.update({
"bigQuery_job": run_stat_facet
})
inputs = self._get_input_from_bq(props)
output = self._get_output_from_bq(props)
if output:
output.custom_facets.update({
"stats": dataset_stat_facet
})
finally:
# Ensure client has close() defined, otherwise ignore.
# NOTE: close() was introduced in python-bigquery v1.23.0
if hasattr(self.client, "close"):
self.client.close()
except Exception as e:
self.logger.error(
f"Cannot retrieve job details from BigQuery.Client. {e}",
exc_info=True
)
run_facets.update({
"bigQuery_error": BigQueryErrorRunFacet(
clientError=f"{e}: {traceback.format_exc()}",
)
})
return BigQueryFacets(run_facets, inputs, output)
def _get_output_statistics(self, properties) \
-> Tuple[BigQueryJobRunFacet, Optional[BigQueryStatisticsDatasetFacet]]:
stages = get_from_nullable_chain(properties, ['statistics', 'query', 'queryPlan'])
json_props = json.dumps(properties)
if not stages:
if get_from_nullable_chain(properties, ['statistics', 'query', 'statementType']) \
== 'CREATE_VIEW':
return BigQueryJobRunFacet(cached=False), None
# we're probably getting cached results
if get_from_nullable_chain(properties, ['statistics', 'query', 'cacheHit']):
return BigQueryJobRunFacet(cached=True), None
if get_from_nullable_chain(properties, ['status', 'state']) != "DONE":
raise ValueError("Trying to extract data from running bigquery job")
raise ValueError(
f"BigQuery properties did not have required data: queryPlan - {json_props}"
)
out_stage = stages[-1]
out_rows = out_stage.get("recordsWritten", None)
out_bytes = out_stage.get("shuffleOutputBytes", None)
billed_bytes = get_from_nullable_chain(properties, [
'statistics', 'query', 'totalBytesBilled'
])
return BigQueryJobRunFacet(
cached=False,
billedBytes=int(billed_bytes) if billed_bytes else None,
properties=json_props
), BigQueryStatisticsDatasetFacet(
rowCount=int(out_rows),
size=int(out_bytes)
) if out_bytes and out_rows else None
def _get_input_from_bq(self, properties):
bq_input_tables = get_from_nullable_chain(properties, [
'statistics', 'query', 'referencedTables'
])
if not bq_input_tables:
return []
input_table_names = [
self._bq_table_name(bq_t) for bq_t in bq_input_tables
]
sources = [
self._source() for bq_t in bq_input_tables
]
try:
return [
Dataset.from_table_schema(
source=source,
table_schema=table_schema
)
for table_schema, source in zip(self._get_table_schemas(
input_table_names
), sources)
]
except Exception as e:
self.logger.warning(f'Could not extract schema from bigquery. {e}')
return [
Dataset.from_table(source, table)
for table, source in zip(input_table_names, sources)
]
def _get_output_from_bq(self, properties) -> Optional[Dataset]:
bq_output_table = get_from_nullable_chain(properties, [
'configuration', 'query', 'destinationTable'
])
if not bq_output_table:
return None
output_table_name = self._bq_table_name(bq_output_table)
source = self._source()
table_schema = self._get_table_safely(output_table_name)
if table_schema:
return Dataset.from_table_schema(
source=source,
table_schema=table_schema,
)
else:
self.logger.warning("Could not resolve output table from bq")
return Dataset.from_table(source, output_table_name)
def _get_table_safely(self, output_table_name):
try:
return self._get_table(output_table_name)
except Exception as e:
self.logger.warning(f'Could not extract output schema from bigquery. {e}')
return None
def _get_table_schemas(self, tables: [str]) \
-> [DbTableSchema]:
# Avoid querying BigQuery by returning an empty array
# if no tables have been provided.
if not tables:
return []
return [self._get_table(table) for table in tables]
def _get_table(self, table: str) -> Optional[DbTableSchema]:
bq_table = self.client.get_table(table)
if not bq_table._properties:
return
table = bq_table._properties
fields = get_from_nullable_chain(table, ['schema', 'fields'])
if not fields:
return
columns = [DbColumn(
name=fields[i].get('name'),
type=fields[i].get('type'),
description=fields[i].get('description'),
ordinal_position=i
) for i in range(len(fields))]
return DbTableSchema(
schema_name=table.get('tableReference').get('projectId') + '.' +
table.get('tableReference').get('datasetId'),
table_name=DbTableName(table.get('tableReference').get('tableId')),
columns=columns
)
def _source(self) -> Source:
return Source(
scheme='bigquery',
connection_url='bigquery'
)
def _bq_table_name(self, bq_table):
project = bq_table.get('projectId')
dataset = bq_table.get('datasetId')
table = bq_table.get('tableId')
return f"{project}.{dataset}.{table}"
| 34.945055 | 95 | 0.618868 |
acf5e2ba658ed8e0dabd6cc41a1030362271e040 | 556 | py | Python | 0823-Binary Trees With Factors/0823-Binary Trees With Factors.py | zhuangli1987/LeetCode-1 | e81788abf9e95e575140f32a58fe983abc97fa4a | [
"MIT"
] | null | null | null | 0823-Binary Trees With Factors/0823-Binary Trees With Factors.py | zhuangli1987/LeetCode-1 | e81788abf9e95e575140f32a58fe983abc97fa4a | [
"MIT"
] | null | null | null | 0823-Binary Trees With Factors/0823-Binary Trees With Factors.py | zhuangli1987/LeetCode-1 | e81788abf9e95e575140f32a58fe983abc97fa4a | [
"MIT"
] | 1 | 2019-11-20T08:01:10.000Z | 2019-11-20T08:01:10.000Z | class Solution:
def numFactoredBinaryTrees(self, A):
"""
:type A: List[int]
:rtype: int
"""
MOD = 1000000007
n = len(A)
A.sort()
table = {x: i for i, x in enumerate(A)}
dp = [1] * n
total = 0
for i in range(n):
for j in range(i):
if A[i] % A[j] == 0:
num = A[i] / A[j]
if num in table:
dp[i] += dp[j] * dp[table[num]]
total += dp[i]
return total % MOD
| 26.47619 | 55 | 0.374101 |
acf5e335d8f0e205dab86c404de1d40aa68bf4cc | 8,244 | py | Python | poetics/stemmer.py | M-R-Epstein/poetics | 6331517c22ca567b9c68e2c668f670855e2ba618 | [
"MIT"
] | 4 | 2019-02-21T20:53:57.000Z | 2022-03-12T16:36:02.000Z | poetics/stemmer.py | M-R-Epstein/poetics | 6331517c22ca567b9c68e2c668f670855e2ba618 | [
"MIT"
] | 1 | 2019-02-19T14:37:29.000Z | 2019-02-19T14:37:29.000Z | poetics/stemmer.py | M-R-Epstein/poetics | 6331517c22ca567b9c68e2c668f670855e2ba618 | [
"MIT"
] | null | null | null | """An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html.
Adapted by Matt Chaput from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation) several times slower than the original porter
algorithm.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y') and len(word) > 1:
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not prev:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
| 26.338658 | 114 | 0.493328 |
acf5e33b0f7cf419bcb9d2d8fa7d26e002937a8b | 3,803 | py | Python | event/event/settings.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | event/event/settings.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | event/event/settings.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for event project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'event'
SPIDER_MODULES = ['event.spiders']
NEWSPIDER_MODULE = 'event.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'event (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# COOKIES_DEBUG = True
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# CUSTOM_REQUEST_HEADERS = OrderedDict({
# 'Host': 'www.infogreffe.com',
# 'Connection': 'keep-alive',
# 'Sec-Fetch-Mode': 'cors',
# 'X-Requested-With': 'XMLHttpRequest',
# 'User-Agent': 'scrapy',
# 'Content-Type': 'application/x-www-form-urlencoded',
# 'Accept': '*/*',
# 'Sec-Fetch-Site': 'same-origin',
# 'Referer': 'https://www.infogreffe.fr/',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',
# 'Cookie': ''
# })
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
# 'fr.middlewares.FrSpiderMiddleware': 543,
# 'fr.middlewares.SpiderExceptionMiddleware': 550,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 300,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
# 'random_useragent.RandomUserAgentMiddleware': 400,
# 'rotating_proxies.middlewares.RotatingProxyMiddleware': 610,
# 'rotating_proxies.middlewares.BanDetectionMiddleware': 620,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'event.pipelines.FrPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 120
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 0.5
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.542056 | 103 | 0.747568 |
acf5e42350c9ad059a7fd14d940a6087915ad42e | 502 | py | Python | 4-2/Machine Learning/class_/Lab3/ex3.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | 4-2/Machine Learning/class_/Lab3/ex3.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | 4-2/Machine Learning/class_/Lab3/ex3.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | import numpy as np
narr = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
print(narr)
data = narr[:2,1:3] # narr에서 첫 두행과 1열, 2열로 이루어진 부분배열 추출
print(data)
print(narr[0][1])
data[0][0] = 77 # 슬라이싱된 배열은 원본 배열과 같은 데이터를 참조한다.
print(narr) # 따라서 원본 배열의 값 또한 수정 되었다.
row1 = narr[1,:] # 원본 배열의 두 번째 행을 rank가 1인 배열로 추출
row2 = narr[1:2,:] # 원본 배열의 두 번째 행을 rank가 2인 배열로 추출
print(row1, row1.shape)
print(row2, row2.shape)
col1 = narr[:,1]
col2 = narr[:,1:2]
print(col1, col1.shape)
print(col2, col2.shape) | 23.904762 | 55 | 0.629482 |
acf5e4679ffed745b23a0e7b2aeeb0b555bda615 | 11,439 | py | Python | smc-monitoring/smc_monitoring/models/filters.py | kobaan/fp-NGFW-SMC-python | 7be57bdde954e4115a887c0140054c87cc0b53a0 | [
"Apache-2.0"
] | 17 | 2019-11-19T07:25:09.000Z | 2022-02-16T16:43:51.000Z | smc-monitoring/smc_monitoring/models/filters.py | kobaan/fp-NGFW-SMC-python | 7be57bdde954e4115a887c0140054c87cc0b53a0 | [
"Apache-2.0"
] | 25 | 2020-05-20T12:27:35.000Z | 2022-02-21T05:27:10.000Z | smc-monitoring/smc_monitoring/models/filters.py | kobaan/fp-NGFW-SMC-python | 7be57bdde954e4115a887c0140054c87cc0b53a0 | [
"Apache-2.0"
] | 7 | 2020-02-04T12:16:50.000Z | 2022-02-18T14:01:04.000Z | """
Filters are used by queries to refine how results are returned.
QueryFilter is the top level 'interface' for all filter types. The ``filter``
attribute of a QueryFilter provides access to the compiled query string used
to build the filter. Each QueryFilter also has an ``update_filter`` method
that can be used to swap new filters in and out of an existing query.
Filters can be added to queries using the add_XXX methods of the query, or by
building the filters and adding to the query using query.update_filter(). Filters
can be swapped in and out of a query.
Examples:
Build a query to return all records of alert severity high or critical::
query = LogQuery(fetch_size=50)
query.add_in_filter(
FieldValue(LogField.ALERTSEVERITY), [ConstantValue(Alerts.HIGH, Alerts.CRITICAL)])
If you prefer building your filters individually, it is not required to call the
add_XX_filter methods of the query. You can also insert filters by building the filter
and calling the ``update_filter`` method on the query::
query = LogQuery(fetch_size=50)
query.update_filter(
InFilter(FieldValue(LogField.SERVICE), [ServiceValue('UDP/53', 'TCP/80')])
You can also replace existing query filters with new filters to re-use the base level
query parameters such as fetch_size, format style, time/date ranges, etc.
Replace the existing query filter with a different filter::
new_filter = InFilter(FieldValue(LogField.SERVICE), [ServiceValue('UDP/53', 'TCP/80')])
query.update_filter(new_filter)
.. note:: it is also possible to update a filter by calling query.add_XX_filter methods
multiple times. Each time will replace an existing filter if it exists.
For example, calling add_XX_filter methods multiple times to refine filter results::
query = LogQuery(fetch_size=50)
query.add_in_filter( # First filter query - look for alert severity high and critical
FieldValue(LogField.ALERTSEVERITY), [ConstantValue(Alerts.HIGH, Alerts.CRITICAL)])
query.add_and_filter([ # Change filter to AND filter for further granularity
InFilter(FieldValue(LogField.ALERTSEVERITY), [ConstantValue(Alerts.HIGH, Alerts.CRITICAL)]),
InFilter(FieldValue(LogField.SRC), [IPValue('192.168.4.84')])])
"""
class QueryFilter(object):
def __init__(self, filter_type):
self.filter = {"type": filter_type}
def update_filter(self, value):
self.filter.update(value=value)
class InFilter(QueryFilter):
"""
InFilter's are made up of two parts, a left and a right. An InFilter
is considered a match if evaluation of the left part is equivalent to
one of the elements of the right part. The left part of an InFilter
is made up of a target of type :class:`smc.monitoring.values.Value`.
The right part is made up of a list of the same type.
Search the Source field for IP addresses 192.168.4.84 or 10.0.0.252::
query = LogQuery(fetch_size=50)
query.add_in_filter(
FieldValue(LogField.SRC), [IPValue('192.168.4.84', '10.0.0.252')])
Reverse the logic and search for IP address 192.168.4.84 in source and
dest log fields::
query = LogQuery(fetch_size=50)
query.add_in_filter(
IPValue('192.168.4.84'), [FieldValue(LogField.SRC, LogField.DST)])
InFilter's are one of the most common filters and are often added to AND, OR
or NOT filters for more specific matching.
:param left: single value for leftmost portion of filter
:type left: Values: any value type in :py:mod:`smc_monitoring.models.values`
:param right: list of values for rightmost portion of filter
:type right: list(Values): any value type in :py:mod:`smc_monitoring.models.values`
"""
def __init__(self, left, right):
super(InFilter, self).__init__("in")
self.update_filter(left, right)
def update_filter(self, left_filter, right_filter):
right_side = []
for filters in right_filter:
right_side.extend(filters.value)
self.filter.update(left=left_filter.value[0], right=right_side)
class AndFilter(QueryFilter):
"""
An AND filter combines other filter types and requires that each filter
matches. An AND filter is a collection of QueryFilter's, typically IN
or NOT filters that are AND'd together.
Example of fetching 50 records for sources matching '192.168.4.84' and
a service of 'TCP/80'::
query = LogQuery(fetch_size=50)
query.add_and_filter([
InFilter(FieldValue(LogField.SRC), [IPValue('192.168.4.84')]),
InFilter(FieldValue(LogField.SERVICE), [ServiceValue('TCP/80')])])
:param QueryFilter filters: Any filter type in :py:mod:`smc.monitoring.filters`.
:type filters: list or tuple
"""
def __init__(self, *filters):
super(AndFilter, self).__init__("and")
if filters:
self.update_filter(*filters)
def update_filter(self, filters):
self.filter.update(values=[value.filter for value in filters])
class OrFilter(QueryFilter):
"""
An OR filter matches if any of the combined filters match. An OR filter
is a collection of QueryFilter's, typically IN or NOT filters that are
OR'd together.
Example of fetching 50 records for sources matching '192.168.4.84' or
a service of 'TCP/80'::
query = LogQuery(fetch_size=50)
query.add_or_filter([
InFilter(FieldValue(LogField.SRC), [IPValue('192.168.4.84')]),
InFilter(FieldValue(LogField.SERVICE), [ServiceValue('TCP/80')])])
:param QueryFilter filters: Any filter type in :py:mod:`smc.monitoring.filters`.
:type filters: list or tuple
"""
def __init__(self, *filters):
super(OrFilter, self).__init__("or")
if filters:
self.update_filter(*filters)
def update_filter(self, filters):
self.filter.update(values=[value.filter for value in filters])
class NotFilter(QueryFilter):
"""
A NOT filter provides the ability to suppress auditing based on a specific
filter. A NOT filter is typically added to an AND filter to remove unwanted
entries from the response.
Use only a NOT filter to a query and to ignore DNS traffic::
query = LogQuery(fetch_size=50)
query.add_not_filter(
[InFilter(FieldValue(LogField.SERVICE), [ServiceValue('UDP/53')])])
The above example by itself is not overly useful, however you can use NOT
filters with AND filters to achieve a logic like "Find source IP 192.168.4.68
and not service UDP/53 or TCP/80"::
query = LogQuery(fetch_size=50)
not_dns = NotFilter(
[InFilter(FieldValue(LogField.SERVICE), [ServiceValue('UDP/53', 'TCP/80')])])
by_ip = InFilter(
FieldValue(LogField.SRC), [IPValue('172.18.1.20')])
query.add_and_filter([not_dns, by_ip])
:param QueryFilter filters: Any filter type in :py:mod:`smc.monitoring.filters`.
:type filters: list or tuple
"""
def __init__(self, *filters):
super(NotFilter, self).__init__("not")
if filters:
self.update_filter(*filters)
def update_filter(self, filters):
self.filter.update(value=filters[0].filter)
class DefinedFilter(QueryFilter):
"""
A Defined Filter applied to a query will only match if the value
specified has a value in the audit record/s.
Show only records that have a defined Action (read as 'match if action
has a value')::
query = LogQuery(fetch_size=50)
query.add_defined_filter(FieldValue(LogField.ACTION))
DefinedFilter's can be used in AND, OR or NOT filter queries as well.
Fetch the most recent 50 records for source 192.168.4.84 that have
an application defined::
query = LogQuery(fetch_size=50)
query.add_and_filter([
DefinedFilter(FieldValue(LogField.IPSAPPID)),
InFilter(FieldValue(LogField.SRC), [IPValue('192.168.4.84')])])
:param Value values: single value type to require on filter
"""
def __init__(self, value=None):
super(DefinedFilter, self).__init__("defined")
if value is not None:
self.update_filter(value)
def update_filter(self, value):
self.filter.update(value=value.value[0])
class CSLikeFilter(QueryFilter):
"""
A CSLikeFilter is a case sensitive LIKE string match filter.
"""
def __init__(self):
super(CSLikeFilter, self).__init__("cs_like")
pass
class CILikeFilter(QueryFilter):
"""
A CILikeFilter is a case insensitive LIKE string match filter.
"""
def __init__(self):
super(CILikeFilter, self).__init__("cs_like")
pass
class TranslatedFilter(QueryFilter):
"""
Translated filters use the SMC internal name alias and builds expressions
to make more complex queries.
Example of using built in filter methods::
query = LogQuery(fetch_size=50)
query.format.timezone('CST')
query.format.field_format('name')
translated_filter = query.add_translated_filter()
translated_filter.within_ipv4_network('$Dst', ['192.168.4.0/24'])
translated_filter.within_ipv4_range('$Src', ['1.1.1.1-192.168.1.254'])
translated_filter.exact_ipv4_match('$Src', ['172.18.1.152', '192.168.4.84'])
"""
def __init__(self):
super(TranslatedFilter, self).__init__("translated")
def within_ipv4_network(self, field, values):
"""
This filter adds specified networks to a filter to check
for inclusion.
:param str field: name of field to filter on. Taken from 'Show Filter
Expression' within Management Client.
:param list values: network definitions, in cidr format, i.e: 1.1.1.0/24.
"""
v = ['ipv4_net("%s")' % net for net in values]
self.update_filter("{} IN union({})".format(field, ",".join(v)))
def within_ipv4_range(self, field, values):
"""
Add an IP range network filter for relevant address fields.
Range (between) filters allow only one range be provided.
:param str field: name of field to filter on. Taken from 'Show Filter
Expression' within Mangement Client.
:param list values: IP range values. Values would be a list of IP's
separated by a '-', i.e. ['1.1.1.1-1.1.1.254']
"""
v = [
'ipv4("%s")' %
part for iprange in values for part in iprange.split("-")]
self.update_filter("{} IN range({})".format(field, ",".join(v)))
def exact_ipv4_match(self, field, values):
"""
An exact IPv4 address match on relevant address fields.
:param str field: name of field to filter on. Taken from 'Show Filter
Expression' within the Management Client.
:param list values: value/s to add. If more than a single value is
provided, the query is modified to use UNION vs. ==
:param bool complex: A complex filter is one which requires AND'ing
or OR'ing values. Set to return the filter before committing.
"""
if len(values) > 1:
v = ['ipv4("%s")' % ip for ip in values]
value = "{} IN union({})".format(field, ",".join(v))
else:
value = '{} == ipv4("{}")'.format(field, values[0])
self.update_filter(value)
| 36.663462 | 100 | 0.670426 |
acf5e5539eb22089683da215c45269fd1ecf93fe | 5,023 | py | Python | excalibur/post_processors/espirito_santo_post_processor.py | baptmont/excalibur | 545a8cee33d42ce5e74c97b0934ecaba92fba04a | [
"MIT"
] | null | null | null | excalibur/post_processors/espirito_santo_post_processor.py | baptmont/excalibur | 545a8cee33d42ce5e74c97b0934ecaba92fba04a | [
"MIT"
] | null | null | null | excalibur/post_processors/espirito_santo_post_processor.py | baptmont/excalibur | 545a8cee33d42ce5e74c97b0934ecaba92fba04a | [
"MIT"
] | null | null | null | import re
from .post_processor import PostProcessor
from ..utils import data_frame_utils
class EspiritoSantoPostProcessor(PostProcessor):
def __init__(self, agency_name) -> None:
self.agency_name = agency_name
def is_aplicable_to_agency(self, agency=None):
try:
agency = agency if agency else self.agency_name
return agency == "espirito_santo"
except Exception:
return False
def is_aplicable_to_dataframe(self, df=None):
try:
count = str([df[column].str.count(r"\d+").sum() for column in df.columns])
print(f"Found a total of {count} possible passing times")
return (
sum(df[column].str.count(r"\d+").sum() for column in df.columns) >= 10
)
except Exception:
return False
def process(self, df):
self._create_route_name(df.iloc[0]) # get route row
df = df[1:] # remove route
df = data_frame_utils.clean_data(
df, split=False
) # remove empty rows and cols without splitting at "\n"s
while (not df.empty) and (
not str(df.iat[0, 0]).startswith("H")
): # remove rows until hours
df = df[1:]
services = df.iloc[:, 0] # get first column
hours = df.iloc[0] # get row with the hours
df = df.drop(df.columns[0], axis=1) # remove service column
df = df[1:] # remove hour row
df = df.where(
df == "-", hours + ":" + df.astype(str)
) # append value in hours to dataframe where condition df=='' is not met
services = services[services.astype(bool)][1:].reset_index(
drop=True
) # remove empty values
return self._slice_dataframe(df, services)
def _create_route_name(self, series):
self.route = "".join(series).strip() # format route row
# returns list of tuples with days of service and table
def _slice_dataframe(self, df, services_df):
df = df.reset_index(drop=True)
temp_df = df.where(df == "-", df.gt(df.shift(periods=1))).replace(
{"-": True}
) # checks if a row has values smaller than the previous row
temp_df.iloc[0] = True # ignore first row since there is not previous row
temp_df = temp_df.all(axis="columns") # boolean reduction
df_list = []
prev_index = 0
services = self.service_to_days(services_df)
for index, value in temp_df.items():
if value is False: # service change
df_list.append(
(next(services), df[prev_index:index])
) # add previous service with sliced dataframe
prev_index = index
if index == temp_df.size - 1: # set last service since dataframe is ending
df_list.append(
(next(services), df[prev_index:])
) # add last service with sliced dataframe
# print(str(df_list))
return df_list
def route_name(self, df=None):
return self.route.replace("\uf0e0", "-") if self.route else ""
# Days generator yields the result depending on the dataframe series with
# the services
def service_to_days(self, services_df):
services_dict = {
"D.?U.?\\n?": ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"],
"SÁB.?\\n?": ["Saturday"],
"DOM.?\\n?": ["Sunday"],
}
service_counter = 0
while True:
try:
service_string = (
services_df.iat[service_counter].replace(" ", "").strip()
)
for synonym, days in services_dict.items():
synonym_regex = re.compile(synonym, re.IGNORECASE)
if bool(synonym_regex.search(service_string)):
yield days
service_counter += 1
except GeneratorExit:
return
except Exception:
yield "None"
def format_message_records(self, df):
route_name_regex = re.compile(
r"\d+ *(?P<origin>(\w\ ?)+)", flags=re.UNICODE
) # route name regex
origin = re.search(route_name_regex, self.route_name()).group(
"origin"
) # extract origin group
# allow anything after stop time regex as group 2
stop_time_regex = fr"({data_frame_utils.stop_time_regex}).*"
# remove group 2 of stop_time_regex and append origin to flat list
records = (
df.replace({stop_time_regex: r"\1"}, regex=True)
.to_numpy()
.flatten()
.tolist()
)
records.insert(0, origin)
records = {
index: record for index, record in enumerate(records)
} # same format as df.to_dict("records")
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_dict.html
return [records]
| 38.638462 | 98 | 0.565399 |
acf5e60bf6453a515a9d55fc76ca579d27443700 | 10,442 | py | Python | est8/backend/definitions.py | MartinHowarth/est8 | fcc5700024830eada2d08b409fdefb9a0edca4a2 | [
"MIT"
] | null | null | null | est8/backend/definitions.py | MartinHowarth/est8 | fcc5700024830eada2d08b409fdefb9a0edca4a2 | [
"MIT"
] | 1 | 2020-01-11T14:50:48.000Z | 2020-01-11T14:50:48.000Z | est8/backend/definitions.py | MartinHowarth/est8 | fcc5700024830eada2d08b409fdefb9a0edca4a2 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from enum import Enum, auto
from random import choice, shuffle
from typing import Tuple, Dict, Iterable, Optional, Generator
class ActionEnum(Enum):
"""Enum of possible card actions."""
bis = auto()
fence = auto()
park = auto()
invest = auto()
pool = auto()
temp = auto()
@dataclass(frozen=True)
class CardDefinition:
number: int
action: ActionEnum
@dataclass
class CardPair:
number_card: CardDefinition = None
action_card: CardDefinition = None
@dataclass(frozen=True)
class DeckDefinition:
bis_numbers: Tuple[int, ...]
fence_numbers: Tuple[int, ...]
park_numbers: Tuple[int, ...]
invest_numbers: Tuple[int, ...]
pool_numbers: Tuple[int, ...]
temp_agency_numbers: Tuple[int, ...]
@classmethod
def default(cls) -> "DeckDefinition":
return cls(
bis_numbers=(3, 4, 6, 7, 8, 9, 10, 12, 13),
fence_numbers=(1, 2, 3, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 11, 13, 14, 15),
park_numbers=(1, 2, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 14, 15),
invest_numbers=(1, 2, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 14, 15),
pool_numbers=(3, 4, 6, 7, 8, 9, 10, 12, 13),
temp_agency_numbers=(3, 4, 6, 7, 8, 8, 9, 10, 12, 13),
)
@property
def deck_size(self) -> int:
return sum(
(
len(self.bis_numbers),
len(self.fence_numbers),
len(self.park_numbers),
len(self.invest_numbers),
len(self.pool_numbers),
len(self.temp_agency_numbers),
)
)
def ordered_card_generator(self) -> Generator[CardDefinition, None, None]:
for number in self.bis_numbers:
yield CardDefinition(number=number, action=ActionEnum.bis)
for number in self.fence_numbers:
yield CardDefinition(number=number, action=ActionEnum.fence)
for number in self.park_numbers:
yield CardDefinition(number=number, action=ActionEnum.park)
for number in self.pool_numbers:
yield CardDefinition(number=number, action=ActionEnum.pool)
for number in self.invest_numbers:
yield CardDefinition(number=number, action=ActionEnum.invest)
for number in self.temp_agency_numbers:
yield CardDefinition(number=number, action=ActionEnum.temp)
def random_card_generator(
self, no_reshuffle_last_n: int = 0
) -> Generator[CardDefinition, None, None]:
"""
A generator that returns each defined card in a random order.
When trying to draw more cards than there are in the deck, all of the cards are
shuffled again and then more are picked.
:param no_reshuffle_last_n: Number of cards that were last drawn to not re-shuffle into
the deck. This simulates behaviour of leaving cards on the table while reshuffling
the rest.
"""
all_cards = list(self.ordered_card_generator())
last_n_cards = []
while True:
# Deal out the current deck in a random order.
shuffle(all_cards)
for card in all_cards:
yield card
# Add back in the previous set of last cards to the front.
all_cards = last_n_cards + all_cards
# Record the last cards dealt from the end
last_n_cards = all_cards[-no_reshuffle_last_n:]
# Remove those cards from the current deck
all_cards = all_cards[: len(all_cards) - no_reshuffle_last_n]
@dataclass(frozen=True)
class StreetDefinition:
num_houses: int
pool_locations: Tuple[int, ...]
park_scoring: Tuple[int, ...]
def can_have_pool_at(self, plot_no: int) -> bool:
return plot_no in self.pool_locations
def park_score(self, num_parks_built: int) -> int:
return self.park_scoring[min(num_parks_built, len(self.park_scoring) - 1)]
@dataclass(frozen=True)
class NeighbourhoodDefinition:
streets: Tuple[StreetDefinition, ...]
@classmethod
def default(cls) -> "NeighbourhoodDefinition":
return cls(
streets=(
StreetDefinition(
num_houses=10, pool_locations=(2, 6, 7), park_scoring=(0, 2, 4, 10),
),
StreetDefinition(
num_houses=11,
pool_locations=(0, 3, 7),
park_scoring=(0, 2, 4, 6, 14),
),
StreetDefinition(
num_houses=12,
pool_locations=(1, 6, 10),
park_scoring=(0, 2, 4, 6, 8, 18),
),
)
)
def can_have_pool_at(self, street_no: int, plot_no: int) -> bool:
if street_no >= len(self.streets) or street_no < 0:
return False
return self.streets[street_no].can_have_pool_at(plot_no)
@dataclass(frozen=True)
class InvestDefinition:
map: Dict[int, Tuple[int, ...]]
@classmethod
def default(cls) -> "InvestDefinition":
return cls(
map={
1: (1, 3),
2: (2, 3, 4),
3: (3, 4, 5, 6),
4: (4, 5, 6, 7, 8),
5: (5, 6, 7, 8, 10),
6: (6, 7, 8, 10, 12),
}
)
def get_estate_value(self, estate_size: int, investment_level: int) -> int:
return self.map[estate_size][
min(investment_level, len(self.map[estate_size]) - 1)
]
@dataclass(frozen=True)
class ScoringDefinition:
"""
Definition of global scoring mechanisms.
NB: per-street scoring handled by the street definition.
"""
bis: Tuple[int, ...]
invest: InvestDefinition
permit_refusal: Tuple[int, ...]
pool: Tuple[int, ...]
roundabout: Tuple[int, ...]
temp_agency: Tuple[int, ...]
@classmethod
def default(cls) -> "ScoringDefinition":
return ScoringDefinition(
bis=(0, -1, -3, -6, -9, -12, -16, -20, -24, -28),
invest=InvestDefinition.default(),
permit_refusal=(0, 0, -3, -5),
pool=(0, 3, 6, 9, 13, 17, 21, 26, 31, 36),
roundabout=(0, -3, -8),
temp_agency=(7, 4, 1),
)
def bis_score(self, num_biss: int) -> int:
return self.bis[min(num_biss, len(self.bis) - 1)]
def permit_refusal_score(self, num_permit_refusals: int) -> int:
return self.permit_refusal[
min(num_permit_refusals, len(self.permit_refusal) - 1)
]
def pool_score(self, num_pools: int) -> int:
return self.pool[min(num_pools, len(self.pool) - 1)]
def roundabouts_score(self, num_roundabouts: int) -> int:
return self.roundabout[min(num_roundabouts, len(self.roundabout) - 1)]
def investment_score(
self, estates: Iterable[int], investments: Dict[int, int]
) -> int:
estate_values: Dict[int, int] = {}
# Build up map of estate size worth
for estate_size in self.invest.map.keys():
estate_values[estate_size] = self.invest.get_estate_value(
estate_size, investments.get(estate_size, 0)
)
# Now sum up estate values.
total = 0
for estate in estates:
total += estate_values[estate]
return total
def temp_agency_score(
self, all_players_temps: Tuple[int, ...], player_temps: int
) -> int:
# Have to use at least one temp to score anything.
if player_temps == 0:
return 0
# Make list of num temps for each podium position, allowing friendly ties.
reduced_sorted_all_players_temps = sorted(set(all_players_temps), reverse=True)
podium_position = reduced_sorted_all_players_temps.index(player_temps)
if podium_position < len(self.temp_agency):
return self.temp_agency[podium_position]
return 0
@dataclass(frozen=True)
class PlanDefinition:
points: Tuple[int, int]
@dataclass(frozen=True)
class PlanDeckDefinition:
no_1: Tuple[PlanDefinition, ...]
no_2: Tuple[PlanDefinition, ...]
no_3: Tuple[PlanDefinition, ...]
@classmethod
def default(cls) -> "PlanDeckDefinition":
return cls(
no_1=(PlanDefinition((6, 2)),),
no_2=(PlanDefinition((8, 3)),),
no_3=(PlanDefinition((11, 5)),),
)
def pick_3(self) -> Tuple[PlanDefinition, PlanDefinition, PlanDefinition]:
return choice(self.no_1), choice(self.no_2), choice(self.no_3)
@dataclass(frozen=True)
class GameDefinition:
neighbourhood: NeighbourhoodDefinition
scoring: ScoringDefinition
deck: DeckDefinition
plans: Tuple[PlanDefinition, PlanDefinition, PlanDefinition]
num_cards_drawn_at_once: int = 3
@classmethod
def default(cls) -> "GameDefinition":
return cls(
neighbourhood=NeighbourhoodDefinition.default(),
scoring=ScoringDefinition.default(),
deck=DeckDefinition.default(),
plans=PlanDeckDefinition.default().pick_3(),
)
def can_have_pool_at(self, street_no: int, plot_no: int) -> bool:
return self.neighbourhood.can_have_pool_at(street_no, plot_no)
@property
def max_roundabouts(self) -> int:
return len(self.scoring.roundabout) - 1
def max_investments_in_estate_size(self, estate_size: int) -> int:
return len(self.scoring.invest.map[estate_size]) - 1
def generate_card_pairs(self) -> Generator[Tuple[CardPair, ...], None, None]:
"""
Generate tuples of CardPairs representing the deck being drawn from.
The number card of the pair is used as the action card in the next pair.
"""
random_card_gen = self.deck.random_card_generator()
def next_n_cards() -> Tuple[CardDefinition]:
return tuple(
(next(random_card_gen) for _ in range(self.num_cards_drawn_at_once))
)
action_cards = next_n_cards()
while True:
number_cards = next_n_cards()
yield tuple(
(
CardPair(number_card=number_cards[i], action_card=action_cards[i])
for i in range(self.num_cards_drawn_at_once)
)
)
action_cards = number_cards
| 32.428571 | 96 | 0.595097 |
acf5e802c37938e043ecd5df86e9425dcc40ca2b | 2,900 | py | Python | tensorflow/lite/testing/op_tests/strided_slice_np_style.py | alvinlin-pn/tensorflow | c9cd1784bf287543d89593ca1432170cdbf694de | [
"Apache-2.0"
] | 2 | 2021-10-10T23:52:17.000Z | 2022-01-22T00:24:39.000Z | tensorflow/lite/testing/op_tests/strided_slice_np_style.py | alvinlin-pn/tensorflow | c9cd1784bf287543d89593ca1432170cdbf694de | [
"Apache-2.0"
] | 3 | 2019-07-25T16:55:56.000Z | 2019-08-01T23:44:31.000Z | tensorflow/lite/testing/op_tests/strided_slice_np_style.py | alvinlin-pn/tensorflow | c9cd1784bf287543d89593ca1432170cdbf694de | [
"Apache-2.0"
] | 1 | 2020-06-07T22:42:37.000Z | 2020-06-07T22:42:37.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for strided_slice_np_style."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
# TODO(b/137615945): Expand the test coverage of this one and remove the old
# ones.
@register_make_test_function()
def make_strided_slice_np_style_tests(options):
"""Make a set of tests to test strided_slice in np style."""
test_parameters = [
{
"dtype": [tf.float32],
"shape": [[12, 7], [33, 1]],
"spec": [[slice(3, 7, 2), slice(None)],
[tf.newaxis,
slice(3, 7, 1), tf.newaxis,
slice(None)], [slice(1, 5, 1), slice(None)]],
},
# 1-D case
{
"dtype": [tf.float32],
"shape": [[44]],
"spec": [[slice(3, 7, 2)], [tf.newaxis, slice(None)]],
},
# Shrink mask.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[slice(3, 7, 2), slice(None), 2]],
},
# Ellipsis.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[slice(3, 7, 2), Ellipsis]],
},
# All combinations.
{
"dtype": [tf.float32],
"shape": [[21, 15, 7]],
"spec": [[tf.newaxis,
slice(3, 7, 2),
slice(None), Ellipsis]],
},
]
def build_graph(parameters):
"""Build a simple graph with np style strided_slice."""
input_value = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"])
out = input_value.__getitem__(parameters["spec"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"], parameters["shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| 35.365854 | 80 | 0.607586 |
acf5e95c59502d72d910e7cc5182710f8dd92a5c | 20,292 | py | Python | tests/test_baker.py | assafnativ/baker | 37d9827ca3ccf13e7f405ad07087e95a9ca3927d | [
"Apache-2.0"
] | 1 | 2022-01-31T03:57:51.000Z | 2022-01-31T03:57:51.000Z | tests/test_baker.py | Sentinel-One/baker | c5f1e0e28352c930f0763f1c8199147fda01b4ca | [
"Apache-2.0"
] | null | null | null | tests/test_baker.py | Sentinel-One/baker | c5f1e0e28352c930f0763f1c8199147fda01b4ca | [
"Apache-2.0"
] | null | null | null | import os
import sys
import bz2
import gzip
import shutil
import tempfile
import unittest
try:
from cStringIO import StringIO
except ImportError: # pragma: no cover
from io import BytesIO as StringIO
import baker
MAIN_HELP = """Usage: script.py COMMAND <options>
Available commands:
main
open Open a URL.
Use 'script.py <command> --help' for individual command help.
"""
COMMAND_HELP = """Usage: script.py open <url> [<xml>] [<json>] [<use>]
Open a URL.
Required Arguments:
url url to open.
Options:
--xml use it if you want an xml output.
--json use it if you want a json output.
--use
(specifying a double hyphen (--) in the argument list means all
subsequent arguments are treated as bare arguments, not options)
"""
INPUT_TEST = """This is a test.
Testing.
"""
INI_SAMPLE = """[main]
#
# --port
port = 8888
# --auth
auth = False
[open]
# Open a URL.
#
#
# Required Arguments:
#
# url url to open.
#
# --xml use it if you want an xml output.
xml = False
# --json use it if you want a json output.
json = False
# --use
use = True
"""
VARARGS_HELP = """Usage: script.py test [<files>...]
Command documentation.
Variable arguments:
*files Varargs documentation.
"""
def build_baker():
b = baker.Baker()
@b.command(default=True)
def main(auth=False, port=8888):
return auth, port
@b.command
def open(url, xml=False, json=False, use=True):
"""
Open a URL.
:param url: url to open.
:param xml: use it if you want an xml output.
:param json: use it if you want a json output.
"""
return url, xml, json, use
return b
class TestFunctions(unittest.TestCase):
def test_totype(self):
"""Test whether totype works"""
candidates = {("true", "yes", "on", "1"): True,
("false", "no", "off", "0"): False}
for values, expected in candidates.items():
for value in values:
self.assertEqual(baker.totype(value, True), expected)
self.assertEqual(baker.totype(value, False), expected)
self.assertEqual(baker.totype("1", 42), 1)
self.assertEqual(baker.totype("1", 0.0), 1.0)
self.assertEqual(baker.totype("1", baker.Baker()), "1")
self.assertRaises(TypeError, baker.totype, "invalid", False)
def test_docstrings(self):
"""Test docstring processing"""
docstring = """This is an example docstring.
:param add: Add a line.
:param remove: Remove a line.
:param more_complicated: A little more complicated.
This is not just a test of indents.
but also how Baker handles blank lines.
:param yetanother: To make sure the regex is correct.
"""
self.maxDiff = None
self.assertEqual(baker.find_param_docs(docstring),
{"add": "Add a line.\n",
"remove": "Remove a line.\n",
"more_complicated": "A little more complicated.\n This is not just a test of indents.\n\n but also how Baker handles blank lines.\n",
"yetanother": "To make sure the regex is correct.\n"})
self.assertEqual(baker.remove_param_docs(docstring),
"This is an example docstring.\n\n" + " " * 8)
self.assertEqual(baker.process_docstring(docstring),
["This is an example docstring.",
":param add: Add a line. "
":param remove: Remove a line. "
":param more_complicated: A little more complicated. This is not just a test of indents.",
"but also how Baker handles blank lines. "
":param yetanother: To make sure the regex is correct."])
def test_openinput(self):
"""Test Baker.openinput()"""
self.assertTrue(baker.openinput('-') is sys.stdin)
tempdir = tempfile.mkdtemp()
for ext, opener in [(".gz", gzip.GzipFile), (".bz2", bz2.BZ2File)]:
g = os.path.join(tempdir, "test" + ext)
input = TestBaker.bytes(INPUT_TEST, 'utf-8')
fobj = opener(g, "w")
fobj.write(input)
fobj.close()
self.assertEqual(baker.openinput(g).read(), input)
class TestBaker(unittest.TestCase):
@staticmethod
def bytes(string, encoding):
if sys.version_info[:2] >= (3, 0): # pragma: no cover
return bytes(string, encoding)
return string
def assertEqual(self, a, b):
# this is for Python 3 compatibility
if sys.version_info[:2] >= (3, 0): # pragma: no cover
if isinstance(a, bytes) and not isinstance(b, bytes):
b = self.bytes(b, 'utf-8')
super(TestBaker, self).assertEqual(a, b)
def test_simple(self):
"""Test a very simple Baker"""
b = baker.Baker()
@b.command
def test(a, b, c):
return (a, b, c)
self.assertEqual(b.run(["s", "test", "1", "2", "3"], main=False),
("1", "2", "3"))
def test_method(self):
"""Test whether Baker.command works on methods too"""
b = baker.Baker()
class Test(object):
def __init__(self, start):
self.start = start
@b.command
def test(self, a, b, cmd=False):
return self.start, a, b, cmd
test = Test(42)
self.assertEqual(b.run(["s", "test", "1", "2", "--cmd"],
instance=test),
(42, "1", "2", True))
def test_default(self):
"""Test default commands"""
b = baker.Baker()
@b.command(default=True)
def test(a="a", b="b", c="c"):
return (a, b, c)
self.assertEqual(b.run(["s", "1", "2", "3"], main=False),
("1", "2", "3"))
self.assertEqual(b.run(["s"], main=False), ("a", "b", "c"))
def test_options(self):
"""Test options"""
b = baker.Baker()
@b.command
def test(a="a", b="b", c="c"):
return (a, b, c)
self.assertEqual(b.run(["s", "test", "-a", "alfa", "-b=bravo"],
main=False),
("alfa", "bravo", "c"))
self.assertEqual(b.run(["s", "test", "alfa", "bravo"], main=False),
("alfa", "bravo", "c"))
self.assertEqual(b.run(["s", "test", "-b", "bravo", "alfa"],
main=False),
("alfa", "bravo", "c"))
self.assertEqual(b.run(["s", "test", "-a", "alfa",
"-b='multiple words'"], main=False),
("alfa", "multiple words", "c"))
def test_shortopts(self):
"""Test short options"""
b = baker.Baker()
@b.command(shortopts={"alfa": "a", "bravo": "b", "charlie": "c"})
def test(alfa="1", bravo="2", charlie=False):
return (alfa, bravo, charlie)
self.assertEqual(b.run(["s", "test", "-a", "100", "-cb200"],
main=False),
("100", "200", True))
def test_optional(self):
"""Test optional arguments"""
b = baker.Baker()
@b.command
def test(a, b=False, c=None):
return (a, b, c)
self.assertEqual(b.run(["s", "test", "100"], main=False),
("100", False, None))
self.assertEqual(b.run(["s", "test", "100", "200"], main=False),
("100", "200", None))
self.assertEqual(b.run(["s", "test", "-b", "100", "200"], main=False),
("100", True, "200"))
def test_kwargs(self):
"""Test **kwargs"""
b = baker.Baker()
@b.command
def test(**kwargs):
return kwargs
self.assertEqual(b.run(["s", "test", "-a", "1", "-b", "2"],
main=False),
{"a": "1", "b": "2"})
def test_defaulted_args_and_kwargs(self):
"""Test *args and **kwargs with default arguments"""
b = baker.Baker()
@b.command
def test(a=0, **kwargs):
return (a, kwargs)
self.assertEqual(b.run(["s", "test", "-a", "1", "-b", "2"],
main=False),
(1, {"b": "2"}))
self.assertEqual(b.run(["s", "test", "-b", "1", "-c", "2"],
main=False),
(0, {"b": "1", "c": "2"}))
def test_args(self):
"""Test *args"""
b = baker.Baker()
@b.command
def test(*args):
return args
self.assertEqual(b.run(["s", "test", "1", "2"], main=False),
("1", "2"))
def test_defaulted_arg_and_args(self):
"""Test *args and arguments with default values"""
b = baker.Baker()
@b.command
def test(a="0", *args):
return (a, args)
self.assertEqual(b.run(["s", "test", "1", "2"], main=False),
("0", ("1", "2")))
self.assertEqual(b.run(["s", "test", "-a", "1", "2"], main=False),
("1", ("2",)))
# This one should assign the named arg first
self.assertEqual(b.run(["s", "test", "2", "-a", "1"], main=False),
("1", ("2",)))
def test_pos_defaulted_arg_and_args(self):
"""Test positional arguments, arguments with default values
and *args
"""
b = baker.Baker()
@b.command
def test(a, b="0", *args):
return (a, b, args)
self.assertEqual(b.run(["s", "test", "1", "-b", "2"], main=False),
("1", "2", ()))
self.assertEqual(b.run(["s", "test", "1", "-b", "2"], main=False),
("1", "2", ()))
self.assertEqual(b.run(["s", "test", "2", "1"], main=False),
("2", "0", ("1",)))
self.assertEqual(b.run(["s", "test", "1", "2", "3"], main=False),
("1", "0", ("2", "3",)))
ce = baker.CommandError
br = b.run
self.assertRaises(ce, br, ["s", "test", "-b", "1", "--c", "2"],
main=False)
self.assertRaises(ce, br, ["s", "test", "1", "--c", "2"],
main=False)
def test_pos_defaulted_arg_and_kwargs_2(self):
"""Test positional arguments, arguments with default values and
**kwargs
"""
b = baker.Baker()
@b.command
def test(a, b="0", **kwargs):
return (a, b, kwargs)
self.assertEqual(b.run(["s", "test", "1", "-b", "2"], main=False),
("1", "2", {}))
self.assertEqual(b.run(["s", "test", "1", "-b", "2", "-c", "3"],
main=False),
("1", "2", {"c": "3"}))
ce = baker.CommandError
br = b.run
self.assertRaises(ce, br, ["s", "test", "-b", "1", "-c", "2"],
main=False)
def test_pos_defaulted_arg_args_and_kwargs(self):
"""Test positional arguments, arguments with default values, *args
and **kwargs
"""
b = baker.Baker()
@b.command
def test(a, b="0", *args, **kwargs):
return (a, b, args, kwargs)
self.assertEqual(b.run(["s", "test", "1", "-b", "2"], main=False),
("1", "2", (), {}))
self.assertEqual(b.run(["s", "test", "1", "-b", "2"], main=False),
("1", "2", (), {}))
self.assertEqual(b.run(["s", "test", "2", "1"], main=False),
("2", "0", ("1",), {}))
self.assertEqual(b.run(["s", "test", "1", "2", "3"], main=False),
("1", "0", ("2", "3",), {}))
self.assertEqual(b.run(["s", "test", "1", "--c", "2"], main=False),
("1", "0", (), {"c": "2"}))
ce = baker.CommandError
br = b.run
self.assertRaises(ce, br, ["s", "test", "-b", "1", "--c", "2"],
main=False)
def test_boolean_arg_and_args(self):
"""Test boolean arguments and *args"""
b = baker.Baker()
@b.command
def test(a=False, *args):
return (a, args)
self.assertEqual(b.run(["s", "test", "1", "2"], main=False),
(False, ("1", "2")))
self.assertEqual(b.run(["s", "test", "-a", "1", "2"], main=False),
(True, ("1", "2")))
def test_noargs(self):
"""Test with a function accepting no arguments"""
b = baker.Baker()
@b.command
def noargs():
return 123
self.assertEqual(b.run(["script.py", "noargs"], main=False), 123)
def test_alias(self):
"""Test command alias"""
b = baker.Baker()
@b.command(name="track-all")
def trackall(workaround=None):
return 123
self.assertEqual(b.run(["script.py", "track-all"], main=False), 123)
ce = baker.CommandError
br = b.run
self.assertRaises(ce, br, ["s", "trackall"], main=False)
def test_single_dash(self):
"""Test single dash (input from stdin)"""
b = baker.Baker()
@b.command
def test(a, b=0):
return a, b
self.assertEqual(b.run(["s", "test", "first"], main=False),
("first", 0))
self.assertEqual(b.run(["s", "test", "-b", "4", "first"],
main=False),
("first", 4))
def test_double_dash(self):
"""Test double dash (--)"""
b = baker.Baker()
@b.command
def test(a, b=0, c=4):
return a, b, c
self.assertEqual(b.run(["s", "test", "-b", "7", "--", "6", "8"],
main=False),
("6", 7, "8"))
self.assertRaises(baker.CommandError, b.run,
["s", "test", "9", "--", "10", "--", "9"],
main=False)
def test_global_command(self):
"""Test whether global command works as expected"""
b = baker.Baker()
self.assertEqual(b.global_options, {})
@b.command(global_command=True)
def global_matcher(n=5, val=True, index="http://pypi.python.org/pypi"):
n = int(n)
if n > 40:
n = -1
return {"num": n, "val": val, "index": index}
@b.command
def test(req, bolly=False):
return req, bolly
@b.command
def second(a, b=0):
return a, b
default_global_options = {"n": 5, "val": True,
"index": "http://pypi.python.org/pypi"}
self.assertEqual(b.global_options, default_global_options)
self.assertEqual(b.run(["s", "test", "rio", "--bolly"], main=False),
("rio", True))
self.assertEqual(b.global_options, default_global_options)
self.assertEqual(b.run(["s", "second", "9"], main=False), ("9", 0))
self.assertEqual(b.global_options, default_global_options)
self.assertEqual(b.run(["s", "-n", "2", "--val", "--index", "short",
"test", "pos"], main=False),
("pos", False))
self.assertEqual(b.global_options, {"num": 2, "val": False, "index":
"short"})
# Make sure that the real command is found even when the previous one
# starts with dashes (-- or -). This happens when the previous option
# is a boolean one.
self.assertEqual(b.run(["s", "-n", "45", "--val", "test", "pos"],
main=False),
("pos", False))
self.assertEqual(b.global_options, {"num": -1, "val": False, "index":
"http://pypi.python.org/pypi"})
def test_global_options_get(self):
b = baker.Baker()
self.assertEqual(b.get('a', 5), 5)
self.assertEqual(b.get('a'), None)
b.global_options = {'a': 2, 'b': 3}
self.assertEqual(b.get('a'), 2)
self.assertEqual(b.get('b', False), 3)
self.assertEqual(b.get('c'), None)
def test_global_command_error(self):
"""Test whether global command raises errors as expected"""
def create_bad_global_command():
b = baker.Baker()
@b.command(global_command=True)
def test(a, b, key='val'):
pass
def create_global(b):
@b.command(global_command=True)
def test(a=1, b=2):
pass
def create_default(b):
@b.command(default=True)
def second(a, b, c=24):
pass
def create_both1():
b = baker.Baker()
create_global(b)
create_default(b)
def create_both2():
b = baker.Baker()
create_default(b)
create_global(b)
ce = baker.CommandError
self.assertRaises(ce, create_bad_global_command)
self.assertRaises(ce, create_both1)
self.assertRaises(ce, create_both2)
def test_nooptional(self):
"""Test with a function accepting only positional arguments"""
b = baker.Baker()
@b.command
def test(a, b, c):
return a, b, c
self.assertEqual(b.run(["script.py", "test", "1", "2", "3"],
main=False), ('1', '2', '3'))
def test_test(self):
"""Test 'test' mode"""
b = baker.Baker()
@b.command
def test(a, b):
return a, b
self.assertEqual(b.test(["s", "test", "1", "2"]), "test('1', '2')")
def test_usage(self):
"""Test usage output"""
b = baker.Baker()
@b.command
def test():
"Test command"
pass
f = StringIO()
b.usage("test", scriptname="script.py", fobj=f)
self.assertEqual(f.getvalue(),
'Usage: script.py test\n\nTest command\n')
def test_varargs_usage(self):
"""Test usage output when *args is used"""
b = baker.Baker()
@b.command
def test(*files):
"""Command documentation.
:param files: Varargs documentation.
"""
return files
out = StringIO()
b.run(["script.py", "test", "--help"], helpfile=out)
self.assertEqual(out.getvalue(), VARARGS_HELP)
def test_help(self):
"""Test program help"""
b = build_baker()
out = StringIO()
b.run(["script.py", "--help"], helpfile=out)
self.assertEqual(out.getvalue(), MAIN_HELP)
out = StringIO()
b.run(["script.py", "open", "--help"], helpfile=out)
self.assertEqual(out.getvalue(), COMMAND_HELP)
def test_writeconfig(self):
"""Test Baker.writeconfig()"""
b = build_baker()
tempdir = tempfile.mkdtemp()
ini = os.path.join(tempdir, "conf.ini")
b.writeconfig(ini)
with open(ini) as fobj:
self.assertEqual(fobj.read(), INI_SAMPLE)
shutil.rmtree(tempdir)
def test_errors(self):
"""Test various errors"""
b = baker.Baker()
@b.command
def test(times=10):
return True
@b.command
def foo(reqd):
return True
ce = baker.CommandError
br = b.run
self.assertRaises(baker.TopHelp, b.run, ["s"], main=False)
self.assertRaises(ce, br, ["s", "blah"], main=False)
self.assertRaises(ce, br, ["s", "test", "--blah"], main=False)
self.assertRaises(ce, br, ["s", "test", "--times", "bar"], main=False)
self.assertRaises(ce, br, ["s", "test", "1", "2", "3"], main=False)
self.assertRaises(ce, br, ["s", "foo"], main=False)
if __name__ == "__main__":
unittest.main()
| 31.656786 | 181 | 0.482308 |
acf5ea0666ebb6d7707990be1e238fdf44231351 | 2,231 | py | Python | DFS.py | ssinad/shopify-backend-challenge-summer-2018 | aa7a3ab28a6c2e2586aa8755f9871abc5bf1eaf6 | [
"MIT"
] | null | null | null | DFS.py | ssinad/shopify-backend-challenge-summer-2018 | aa7a3ab28a6c2e2586aa8755f9871abc5bf1eaf6 | [
"MIT"
] | null | null | null | DFS.py | ssinad/shopify-backend-challenge-summer-2018 | aa7a3ab28a6c2e2586aa8755f9871abc5bf1eaf6 | [
"MIT"
] | null | null | null | # class Visitor:
# def visit(self, node):
# pass
import json
class DFS:
PERMANENT = 'p'
TEMPORARY = 't'
def __init__(self, graph):
self.__sorted_nodes = []
self.__valid_menus = []
self.__invalid_menus = []
self.__graph = graph
self.__marks = {}
self.__output = {}
@property
def output(self):
self.__topological_sort()
self.__output = {"valid_menus": [], "invalid_menus": []}
for item in self.__valid_menus:
self.__output["valid_menus"].append({"root_id": item[0], "children": item[1:]})
for item in self.__invalid_menus:
self.__output["invalid_menus"].append({"root_id": item[0], "children": item[1:]})
return dict(self.__output)
def json_format(self):
return json.dumps(dict(self.output))
def __topological_sort(self):
for node_id in self.__graph:
# Run DFS and topological sort on each root node
if self.__graph[node_id].is_root:
self.__sorted_nodes = []
valid = self.__visit(node_id)
# Child nodes are sorted
if valid:
self.__valid_menus.append(sorted(self.__sorted_nodes[::-1]))
else:
self.__invalid_menus.append(sorted(self.__sorted_nodes[::-1]))
def __visit(self, node_id):
# print("current node is", node_id)
if node_id in self.__marks:
if self.__marks[node_id] == 'p':
# print("current node is", node_id)
return True
elif self.__marks[node_id] == 't':
# print("current node is", node_id)
self.__sorted_nodes.append(node_id)
return False
# Cycle Detected
else:
self.__marks[node_id] = 't'
# b = all(self.visit(neighbor) for neighbor in self.graph[node_id].child_ids)
b = True
for neighbor in self.__graph[node_id].child_ids:
tmp = self.__visit(neighbor)
b = b and tmp
self.__marks[node_id] = 'p'
self.__sorted_nodes.append(node_id)
return b
| 34.323077 | 93 | 0.545944 |
acf5eb8e822d21ecf7f7cfc20caf2cb30f677e09 | 2,121 | py | Python | bdn/transaction/tests.py | OpenSourceUniversity/bdn | 8e8d5b4d63ff4cb9bdf7c5f23d07aa3ad3dd0121 | [
"MIT"
] | 1 | 2019-01-18T19:57:25.000Z | 2019-01-18T19:57:25.000Z | bdn/transaction/tests.py | OpenSourceUniversity/bdn | 8e8d5b4d63ff4cb9bdf7c5f23d07aa3ad3dd0121 | [
"MIT"
] | 3 | 2019-06-23T17:26:24.000Z | 2022-02-11T03:40:54.000Z | bdn/transaction/tests.py | OpenSourceUniversity/bdn | 8e8d5b4d63ff4cb9bdf7c5f23d07aa3ad3dd0121 | [
"MIT"
] | null | null | null | # flake8: noqa
import uuid
from django.test import RequestFactory, TestCase
from bdn.auth.models import User
from .views import TransactionViewSet
class TransactionTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_create_list_transaction(self):
# Create new transaction
eth_address = '0xD2BE64317Eb1832309DF8c8C18B09871809f3735'.lower()
user, _ = User.objects.get_or_create(username=eth_address)
request = self.factory.post(
'/api/v1/transactions/',
data={
'value': 1,
'receiver': eth_address,
},
HTTP_AUTH_SIGNATURE='0xe646de646dde9cee6875e3845428ce6fc13d41086e8a7f6531d1d526598cc4104122e01c38255d1e1d595710986d193f52e3dbc47cb01cb554d8e4572d6920361c',
HTTP_AUTH_ETH_ADDRESS='D2BE64317Eb1832309DF8c8C18B09871809f3735'
)
response = TransactionViewSet.as_view({'post': 'create'})(request)
self.assertEqual(response.status_code, 200)
# Create new transaction serializer error
request = self.factory.post(
'/api/v1/transactions/',
data={
'receiver': eth_address,
},
HTTP_AUTH_SIGNATURE='0xe646de646dde9cee6875e3845428ce6fc13d41086e8a7f6531d1d526598cc4104122e01c38255d1e1d595710986d193f52e3dbc47cb01cb554d8e4572d6920361c',
HTTP_AUTH_ETH_ADDRESS='D2BE64317Eb1832309DF8c8C18B09871809f3735'
)
response = TransactionViewSet.as_view({'post': 'create'})(request)
self.assertEqual(response.status_code, 400)
# List transactions
request = self.factory.get(
'/api/v1/transactions/',
data={
},
HTTP_AUTH_SIGNATURE='0xe646de646dde9cee6875e3845428ce6fc13d41086e8a7f6531d1d526598cc4104122e01c38255d1e1d595710986d193f52e3dbc47cb01cb554d8e4572d6920361c',
HTTP_AUTH_ETH_ADDRESS='D2BE64317Eb1832309DF8c8C18B09871809f3735'
)
response = TransactionViewSet.as_view({'get': 'list'})(request)
self.assertEqual(response.status_code, 200)
| 41.588235 | 167 | 0.691183 |
acf5ebdd9389307e046605815835e08adc226792 | 2,201 | py | Python | code/processing/growth_rates/2021-08-31_r1_SingleKO_acetate/analysis.py | cremerlab/useless_expression | a6020674f0ae73b4cc6173de60a0ea93016ee562 | [
"MIT"
] | null | null | null | code/processing/growth_rates/2021-08-31_r1_SingleKO_acetate/analysis.py | cremerlab/useless_expression | a6020674f0ae73b4cc6173de60a0ea93016ee562 | [
"MIT"
] | null | null | null | code/processing/growth_rates/2021-08-31_r1_SingleKO_acetate/analysis.py | cremerlab/useless_expression | a6020674f0ae73b4cc6173de60a0ea93016ee562 | [
"MIT"
] | null | null | null | #%%
import numpy as np
import pandas as pd
import futileprot.viz
import altair as alt
import altair_saver
import scipy.stats
colors, palette = futileprot.viz.altair_style()
# Add metadata
DATE = '2021-08-31'
RUN_NO = 1
STRAINS = 'SingleKO'
MEDIUM = 'acetate'
# Load the measurement data
data = pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv')
# Perform a simplistic inference of the growth rate to get a sense of what
# the result is.
data = data[['strain', 'elapsed_time_hr', 'od_600nm']]
# For each strain, infer the growth rate and compute the fit
layout = False
for g, d in data.groupby(['strain']):
time_range = np.linspace(0, 1.25 * d['elapsed_time_hr'].max(), 10)
# Perform the regression
popt = scipy.stats.linregress(d['elapsed_time_hr'], np.log(d['od_600nm']))
slope, intercept, err = popt[0], popt[1], popt[-1]
print(f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.')
# Compute the fit
fit = np.exp(intercept + slope * time_range)
fit_df = pd.DataFrame([])
fit_df['elapsed_time_hr'] = time_range
fit_df['od_600nm'] = fit
# Generate the plot
points = alt.Chart(
data=d,
width=300,
height=150
).mark_point(
color=colors['primary_blue']
).encode(
x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),
y=alt.Y('od_600nm:Q', title='optical density [a.u]',
scale=alt.Scale(type='log'))
)
fit = alt.Chart(data=fit_df,
title=f'{g}, {MEDIUM}: µ = {slope:0.3f} ± {err:0.3f} per hr.'
).mark_line(
color=colors['primary_blue']
).encode(
x='elapsed_time_hr:Q',
y='od_600nm:Q'
)
merge = points + fit
if layout == False:
layout = merge
else:
layout &= merge
altair_saver.save(layout, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_fits.png',
scale_factor=2)
# %%
| 32.367647 | 89 | 0.547024 |
acf5ec99cc01d93e27094277b268ffb92b45a078 | 3,027 | py | Python | trappy/stats/Indexer.py | mike2390/trappy | e189dd94528c5affe110a7e6d137463e7c1c74ec | [
"Apache-2.0"
] | null | null | null | trappy/stats/Indexer.py | mike2390/trappy | e189dd94528c5affe110a7e6d137463e7c1c74ec | [
"Apache-2.0"
] | null | null | null | trappy/stats/Indexer.py | mike2390/trappy | e189dd94528c5affe110a7e6d137463e7c1c74ec | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Indexers are responsible for providing indexes for
aggregations and provide specific functions like
unification and resampling.
"""
import pandas as pd
import numpy as np
from trappy.utils import listify
from trappy.stats import StatConf
class Indexer(object):
"""Indexer base class is an encapsulation
around the pandas Index object with some
special functionality
:param index: Pandas index object. This can be
non-unoform and non-unique
:type index: :mod:`pandas.Index`
:param traces: trappy FTrace list/singular object
:type traces: :mod:`trappy.trace.FTrace`
"""
def __init__(self, index):
self.index = index
def series(self):
"""Returns an empty series with the initialized index
"""
return pd.Series(np.zeros(len(self.index)), index=self.index)
def get_uniform(self, delta=StatConf.DELTA_DEFAULT):
"""
:param delta: Difference between two indices. This has a
default value specified in StatConf.DELTA_DEFAULT
:type delta: float
:return: A uniformly spaced index.
"""
uniform_start = self.index.values[0]
uniform_end = self.index.values[-1]
new_index = np.arange(uniform_start, uniform_end, delta)
return new_index
def get_unified_indexer(indexers):
"""Unify the List of Indexers
:param indexers: A list of indexers
:type indexers: :mod:`trappy.stats.Indexer.Indexer`
:return: A :mod:`pandas.Indexer.Indexer`
with a unfied index
"""
new_index = indexers[0].index
for idx in indexers[1:]:
new_index = new_index.union(idx.index)
return Indexer(new_index)
class MultiTriggerIndexer(Indexer):
""""The index unifies the indices of all trigger
events.
:param triggers: A (list or single) trigger
:type triggers: :mod:`trappy.stats.Trigger.Trigger`
"""
def __init__(self, triggers):
self._triggers = listify(triggers)
super(MultiTriggerIndexer, self).__init__(self._unify())
def _unify(self):
"""Function to unify all the indices of each trigger
"""
idx = pd.Index([])
for trigger in self._triggers:
trace = trigger.trace
trappy_event = getattr(trace, trigger.template.name)
idx = idx.union(trappy_event.data_frame.index)
return pd.Index(np.unique(idx.values))
| 29.38835 | 74 | 0.676247 |
acf5ecfff2dc91c79b9f96debd4669b97f858373 | 2,646 | py | Python | earl2datasets/EntityLinkingForQADatasets/webqsp/parse.py | debayan/Pointer-Networks | e066a02ba3aa1e152b9ec479c5d9b0a9e0e38ed8 | [
"MIT"
] | null | null | null | earl2datasets/EntityLinkingForQADatasets/webqsp/parse.py | debayan/Pointer-Networks | e066a02ba3aa1e152b9ec479c5d9b0a9e0e38ed8 | [
"MIT"
] | null | null | null | earl2datasets/EntityLinkingForQADatasets/webqsp/parse.py | debayan/Pointer-Networks | e066a02ba3aa1e152b9ec479c5d9b0a9e0e38ed8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import print_function
import sys,json
import requests,re
from multiprocessing import Pool
import urllib.request
def hiturl(questionserial):
question = questionserial[0]
serial = questionserial[1]['question_id']
try:
print(question)
question = re.sub(r"[^a-zA-Z0-9]+", ' ', question)
conditionsSetURL = 'https://labs.tib.eu/falcon/falcon2/api?mode=short'
newConditions = {'text': question}
params = json.dumps(newConditions).encode('utf8')
req = urllib.request.Request(conditionsSetURL, data=params, headers={'content-type': 'application/json'})
response = urllib.request.urlopen(req)
response = response.read().decode('utf8')
print(response)
return (serial,response,questionserial[1])
except Exception as e:
return(serial,'[]',questionserial[1])
f = open('input/webqsp.test.entities.with_classes.json')
s = f.read()
d = json.loads(s)
f.close()
questions = []
for item in d:
questions.append((item['utterance'],item))
pool = Pool(5)
responses = pool.imap(hiturl,questions)
_results = []
count = 0
totalentchunks = 0
tpentity = 0
fpentity = 0
fnentity = 0
for response in responses:
count += 1
print(count)
# item = response[2]
# goldentities = re.findall( r'wd:(.*?) ', item['sparql_wikidata'])
# queryentities = []
# if 'rerankedlists' in json.loads(response[1]):
# for num,urltuples in json.loads(response[1])['rerankedlists'].iteritems():
# if json.loads(response[1])['chunktext'][int(num)]['class'] == 'entity':
# for urltuple in urltuples:
# queryentities.append(urltuple[1][0])
# break
# for ent in goldentities:
# totalentchunks += 1
# if ent in queryentities:
# tpentity += 1
# else:
# fpentity += 1
# for ent in queryentities:
# if ent not in goldentities:
# fnentity += 1
# try:
# precisionentity = tpentity/float(tpentity+fpentity)
# recallentity = tpentity/float(tpentity+fnentity)
# f1entity = 2*(precisionentity*recallentity)/(precisionentity+recallentity)
# print("precision entity = ",precisionentity)
# print("recall entity = ",recallentity)
# print("f1 entity = ",f1entity)
# except Exception:
# pass
_results.append((response[0],json.loads(response[1])))
#_results = sorted(_results, key=lambda tup: tup[0])
results = []
for result in _results:
results.append(result)
f1 = open('falcon2webqstest.json','w')
print(json.dumps(results),file=f1)
f1.close()
| 30.413793 | 113 | 0.636432 |
acf5ed7af3946213ddc38de11c33a6ac12a232fb | 493 | py | Python | src/abc217_d.py | 06keito/study-atcoder | c859e542079b550d19fa5e5e632e982a0dbb9578 | [
"MIT"
] | 1 | 2021-08-19T07:21:47.000Z | 2021-08-19T07:21:47.000Z | src/abc217_d.py | 06keito/main-repository | c859e542079b550d19fa5e5e632e982a0dbb9578 | [
"MIT"
] | null | null | null | src/abc217_d.py | 06keito/main-repository | c859e542079b550d19fa5e5e632e982a0dbb9578 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Reference https://kanpurin.hatenablog.com/entry/2021/09/05/163703
import bisect
import array
def main():
L,Q = map(int,input().split())
separator = array.array('i',[0,L])
for _ in range(Q):
c,x = map(int,input().split())
y = bisect.bisect(separator,x)
if c==1:
separator.insert(y,x)
else:
print(separator[y]-separator[y-1])
if __name__ == '__main__':
main() | 23.47619 | 67 | 0.557809 |
acf5ed96138ed451baebbeec96dd18a25d9c6be7 | 1,883 | py | Python | landlab/components/flexure/examples/example_random_point_loads.py | awickert/landlab | 496de56717a5877db96f354a1b1285bfabe8b56f | [
"MIT"
] | 1 | 2015-08-17T19:29:50.000Z | 2015-08-17T19:29:50.000Z | landlab/components/flexure/examples/example_random_point_loads.py | awickert/landlab | 496de56717a5877db96f354a1b1285bfabe8b56f | [
"MIT"
] | 1 | 2018-04-07T08:24:56.000Z | 2018-04-07T13:52:03.000Z | landlab/components/flexure/examples/example_random_point_loads.py | awickert/landlab | 496de56717a5877db96f354a1b1285bfabe8b56f | [
"MIT"
] | 2 | 2017-07-03T20:21:13.000Z | 2018-09-06T23:58:19.000Z | #! /usr/bin/env python
import numpy as np
from landlab.components.flexure import Flexure
from landlab import RasterModelGrid
def get_random_load_locations(shape, n_loads):
return np.random.random_integers(0, shape[0] * shape[1] - 1, n_loads)
def get_random_load_magnitudes(n_loads):
return np.random.normal(1e3, 10e7, n_loads)
def put_loads_on_grid(grid, load_locations, load_sizes):
load = grid.at_node['lithosphere__overlying_pressure_increment'].view()
for (loc, size) in zip(load_locations, load_sizes):
load.flat[loc] = size
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-loads', type=int, default=16,
help='Number of loads to apply')
parser.add_argument('--shape', type=int, default=200,
help='Number rows and columns')
parser.add_argument('--spacing', type=int, default=5e3,
help='Spading between rows and columns (m)')
parser.add_argument('--n-procs', type=int, default=1,
help='Number of processors to use')
parser.add_argument('--plot', action='store_true', default=False,
help='Plot an image of the total deflection')
args = parser.parse_args()
shape = (args.shape, args.shape)
spacing = (args.spacing, args.spacing)
load_locs = get_random_load_locations(shape, args.n_loads)
load_sizes = get_random_load_magnitudes(args.n_loads)
grid = RasterModelGrid(shape[0], shape[1], spacing[0])
flex = Flexure(grid, method='flexure')
put_loads_on_grid(grid, load_locs, load_sizes)
flex.update(n_procs=args.n_procs)
if args.plot:
grid.imshow('node', 'lithosphere_surface__elevation_increment',
symmetric_cbar=False, cmap='spectral', show=True)
if __name__ == '__main__':
main()
| 31.383333 | 75 | 0.667552 |
acf5edd45c786bbe70eea8e0079e2e4059e8d5a9 | 1,514 | py | Python | tests/python/test_cvt_numpy.py | winnerineast/taichi | 57ae0abc374e0df8f0b54bde4bcb92d9d97ed269 | [
"MIT"
] | null | null | null | tests/python/test_cvt_numpy.py | winnerineast/taichi | 57ae0abc374e0df8f0b54bde4bcb92d9d97ed269 | [
"MIT"
] | null | null | null | tests/python/test_cvt_numpy.py | winnerineast/taichi | 57ae0abc374e0df8f0b54bde4bcb92d9d97ed269 | [
"MIT"
] | null | null | null | import taichi as ti
import numpy as np
@ti.all_archs
def test_from_numpy_2d():
val = ti.var(ti.i32)
n = 4
m = 7
@ti.layout
def values():
ti.root.dense(ti.ij, (n, m)).place(val)
for i in range(n):
for j in range(m):
val[i, j] = i + j * 3
arr = val.to_numpy()
assert arr.shape == (4, 7)
for i in range(n):
for j in range(m):
assert arr[i, j] == i + j * 3
@ti.all_archs
def test_to_numpy_2d():
val = ti.var(ti.i32)
n = 4
m = 7
@ti.layout
def values():
ti.root.dense(ti.ij, (n, m)).place(val)
arr = np.empty(shape=(n, m), dtype=np.int32)
for i in range(n):
for j in range(m):
arr[i, j] = i + j * 3
val.from_numpy(arr)
for i in range(n):
for j in range(m):
assert val[i, j] == i + j * 3
@ti.all_archs
def test_to_numpy_2d():
val = ti.var(ti.i32)
n = 4
m = 7
@ti.layout
def values():
ti.root.dense(ti.ij, (n, m)).place(val)
arr = np.empty(shape=(n, m), dtype=np.int32)
for i in range(n):
for j in range(m):
arr[i, j] = i + j * 3
val.from_numpy(arr)
for i in range(n):
for j in range(m):
assert val[i, j] == i + j * 3
@ti.all_archs
def test_f64():
val = ti.var(ti.f64)
n = 4
m = 7
@ti.layout
def values():
ti.root.dense(ti.ij, (n, m)).place(val)
for i in range(n):
for j in range(m):
val[i, j] = (i + j * 3) * 1e100
val.from_numpy(val.to_numpy() * 2)
for i in range(n):
for j in range(m):
assert val[i, j] == (i + j * 3) * 2e100
| 16.456522 | 46 | 0.53897 |
acf5edebb87ce8994cb2589e23da1e5175bc6e0f | 1,317 | py | Python | doping_monitor/MonitorTestCaseSelection.py | Biewer/Doping-Tests-for-Cyber-Physical-Systems-Tool | eb359ba618f0022dcd403edc99904f3ef2940e65 | [
"MIT"
] | null | null | null | doping_monitor/MonitorTestCaseSelection.py | Biewer/Doping-Tests-for-Cyber-Physical-Systems-Tool | eb359ba618f0022dcd403edc99904f3ef2940e65 | [
"MIT"
] | null | null | null | doping_monitor/MonitorTestCaseSelection.py | Biewer/Doping-Tests-for-Cyber-Physical-Systems-Tool | eb359ba618f0022dcd403edc99904f3ef2940e65 | [
"MIT"
] | null | null | null | import os, sys
sys.path.insert(0, os.path.abspath("../"))
from tool.doping_test import TestCaseSelection, Input
class MonitorTestCaseSelection(TestCaseSelection):
"""test case selection that instructs DT according to the data of recordedTrace"""
def __init__(self, recordedTrace):
super(MonitorTestCaseSelection, self).__init__()
self.recordedTrace = recordedTrace
def get_next_option(self, history):
# check the current symbol of the recordedTrace
next_symbol = self.recordedTrace.get_current_symbol()
#if it is None, we are at the end of the stream and terminate the testing
if next_symbol == None:
return TestCaseSelection.OPTION_PASS
# if it is an input, we chosse option 2 of DT
if isinstance(next_symbol, Input):
return TestCaseSelection.OPTION_INPUT
# if the next symbol is an output, we pick option 3 of DT
else:
return TestCaseSelection.OPTION_OUTPUT
def get_next_input(self, history):
# get the input from the recordedTrace
next_symbol = self.recordedTrace.get_current_symbol()
# reassure that it is an input
if isinstance(next_symbol, Input):
# advance the internal symbol position of the recordedTrace to the next symbol
self.recordedTrace.advance_symbol()
return next_symbol
else:
# complain otherwise
raise Exception('No input available!') | 35.594595 | 83 | 0.769172 |
acf5ee41781278b777d182f054e3fd872a564c53 | 247 | py | Python | tests/io/slurm/SlurmConfiguration/test____init__.py | eragasa/mexm-base | c8d84057c483e1bd06bb8b2e835274f6a4cd61b9 | [
"MIT"
] | 1 | 2021-01-03T21:30:47.000Z | 2021-01-03T21:30:47.000Z | tests/io/slurm/SlurmConfiguration/test____init__.py | eragasa/mexm-base | c8d84057c483e1bd06bb8b2e835274f6a4cd61b9 | [
"MIT"
] | null | null | null | tests/io/slurm/SlurmConfiguration/test____init__.py | eragasa/mexm-base | c8d84057c483e1bd06bb8b2e835274f6a4cd61b9 | [
"MIT"
] | null | null | null | import pytest
from mexm.io.slurm import SlurmConfiguration
def dev____init____no_args():
slurm_configuration = SlurmConfiguration()
print(slurm_configuration)
def test____init____no_args():
slurm_configuration = SlurmConfiguration()
| 24.7 | 46 | 0.809717 |
acf5eeb55de19b4ab226f89b319e72933a375406 | 2,304 | py | Python | gicd/api.py | zmarffy/gicd | 6c359da61da2f443218ce18809c5b6c94388275f | [
"MIT"
] | null | null | null | gicd/api.py | zmarffy/gicd | 6c359da61da2f443218ce18809c5b6c94388275f | [
"MIT"
] | null | null | null | gicd/api.py | zmarffy/gicd | 6c359da61da2f443218ce18809c5b6c94388275f | [
"MIT"
] | null | null | null | import logging
import traceback
from functools import wraps
from subprocess import check_output
from typing import Any, List, Optional
import zmtools
LOGGER = logging.getLogger(__name__)
class GICD():
def __init__(self, repo_owner: str, repo_name: str) -> None:
"""Class that provides a function to create an issue on GitHub's issues section for a specific repo
Args:
repo_owner (str): The owner of the repo
repo_name (str): The name of the repo
"""
self.repo_owner = repo_owner
self.repo_name = repo_name
def _create_issue(self, issue_title: str, issue_body: str, issue_label: str = "bug") -> str:
"""Create an issue on GitHub's issues section for a specific repo
Args:
issue_title (str): The title of the issue to create
issue_body (str): The body of the issue to create
issue_label (str, optional): The label to attach to the issue. Defaults to "bug".
Returns:
str: The URL of the created issue
"""
return check_output(["gh", "issue", "create", "--title", issue_title, "--body", issue_body, "--label", issue_label, "-R", f"{self.repo_owner}/{self.repo_name}"])
def auto_create_issue(self, exceptions: Optional[List[Exception]] = None) -> Any:
"""Decorator to create a GitHub issue on exception throw
Args:
exceptions (list[Exception], optional): The exception types to create an issue for. If None, create the issue for any exception. Defaults to None.
"""
def actual_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if not exceptions or any(isinstance(e, etype) for etype in exceptions):
tb = traceback.format_exc().strip()
issue_title = zmtools.truncate(
tb.split("\n")[-1], length=25)
issue = self._create_issue(issue_title, f"```python\n{tb}\n```")
LOGGER.info(f"An issue was created at {issue}")
raise e
return wrapper
return actual_decorator
| 38.4 | 169 | 0.591146 |
acf5efb20d3a25edc889d86e881a1891118438e9 | 1,070 | py | Python | monopoly/__main__.py | alexover1/pinopoly | 4330a1e4c7832c8f15d5255f72bbcfc466b83d83 | [
"MIT"
] | 13 | 2021-06-13T05:55:41.000Z | 2021-06-23T17:15:53.000Z | monopoly/__main__.py | alexover1/pinopoly | 4330a1e4c7832c8f15d5255f72bbcfc466b83d83 | [
"MIT"
] | null | null | null | monopoly/__main__.py | alexover1/pinopoly | 4330a1e4c7832c8f15d5255f72bbcfc466b83d83 | [
"MIT"
] | null | null | null | from monopoly.menu import Menu
from monopoly.user import User, delete_users
import enquiries, art
############################################
# MAIN
############################################
def main():
menu = Menu()
menu.console.clear()
art.tprint("pinopoly")
options = [
"Start a new game",
"Resume existing game",
"Add a user",
"Manage users",
"Delete all users",
"Exit",
]
choice = enquiries.choose("Welcome to pinopoly", options)
if choice == "Start a new game":
menu.start_new_game()
elif choice == "Resume existing game":
menu.resume_game()
elif choice == "Add a user":
name = enquiries.freetext("What is the player's name?")
if not name:
exit(1)
User(name).save()
elif choice == "Delete all users":
if enquiries.confirm(
"Are you sure you want to delete all users?", single_key=True
):
delete_users()
else:
menu.exit()
if __name__ == "__main__":
main()
| 22.765957 | 73 | 0.519626 |
acf5efccf8b28d4938a6cd37f79eedda4a0a3517 | 520 | py | Python | taskbuster/test.py | davidwurster/taskbuster_project | c9d624ac6cae20d2cd1dedec0236731a2c9e1822 | [
"MIT"
] | null | null | null | taskbuster/test.py | davidwurster/taskbuster_project | c9d624ac6cae20d2cd1dedec0236731a2c9e1822 | [
"MIT"
] | 6 | 2020-06-05T18:36:42.000Z | 2022-02-10T07:29:10.000Z | taskbuster/test.py | davidwurster/taskbuster_project | c9d624ac6cae20d2cd1dedec0236731a2c9e1822 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.utils.translation import activate
from django.test import TestCase
from django.urls import reverse
class TestHomePage(TestCase):
def test_uses_index_template(self):
activate('en')
response = self.client.get(reverse("home"))
self.assertTemplateUsed(response, "taskbuster/index.html")
def test_uses_base_template(self):
activate('en')
response = self.client.get(reverse("home"))
self.assertTemplateUsed(response, "base.html")
| 28.888889 | 66 | 0.696154 |
acf5efd48fb06ca2473d9d49bc741ce964d98d8c | 526 | py | Python | tests/test_getting_include_dir.py | TriForceX/Cppy | 7996723c2abb2268e38644d95264a8c988c626f4 | [
"BSD-3-Clause"
] | 73 | 2015-07-01T23:10:00.000Z | 2022-03-25T13:44:20.000Z | tests/test_getting_include_dir.py | Saiprasad16/cppy | e400b703f65167a8c844d5d30808d0d2c95cc570 | [
"BSD-3-Clause"
] | 12 | 2016-12-14T07:39:00.000Z | 2022-03-30T19:42:56.000Z | tests/test_getting_include_dir.py | Saiprasad16/cppy | e400b703f65167a8c844d5d30808d0d2c95cc570 | [
"BSD-3-Clause"
] | 23 | 2015-04-14T10:06:32.000Z | 2022-03-22T08:50:08.000Z | #------------------------------------------------------------------------------
# Copyright (c) 2014-2019, Nucleic
#
# Distributed under the terms of the BSD 3-Clause License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
"""Test getting the include directory.
"""
def test_getting_include_directory():
"""Test getting the include directory.
"""
from cppy import get_include
assert get_include()
| 29.222222 | 79 | 0.503802 |
acf5f10a71031fe2116519708da609e52c5680c6 | 305 | py | Python | gazelle/testdata/first_party_file_and_directory_modules/__main__.py | axivion/rules_python | 7740b22d0bae942af0797967f2617daa19834cb3 | [
"Apache-2.0"
] | null | null | null | gazelle/testdata/first_party_file_and_directory_modules/__main__.py | axivion/rules_python | 7740b22d0bae942af0797967f2617daa19834cb3 | [
"Apache-2.0"
] | null | null | null | gazelle/testdata/first_party_file_and_directory_modules/__main__.py | axivion/rules_python | 7740b22d0bae942af0797967f2617daa19834cb3 | [
"Apache-2.0"
] | null | null | null | import foo
from baz import baz as another_baz
from foo.bar import baz
from one.two import two
from package1.subpackage1.module1 import find_me
assert not hasattr(foo, "foo")
assert baz() == "baz from foo/bar.py"
assert another_baz() == "baz from baz.py"
assert two() == "two"
assert find_me() == "found"
| 25.416667 | 48 | 0.734426 |
acf5f131e1e589359e395ac2afe7c77e36093171 | 1,841 | py | Python | generate_plasma_recording.py | BioMedAnalysis/petmr-bids | dd259b11578fe1edadcf797a3af6ba35f33aee3b | [
"Apache-2.0"
] | null | null | null | generate_plasma_recording.py | BioMedAnalysis/petmr-bids | dd259b11578fe1edadcf797a3af6ba35f33aee3b | [
"Apache-2.0"
] | null | null | null | generate_plasma_recording.py | BioMedAnalysis/petmr-bids | dd259b11578fe1edadcf797a3af6ba35f33aee3b | [
"Apache-2.0"
] | null | null | null | import json
import argparse
plasma_blood_discrite_json_template = {
"SampleTime": {
"Description": "Time of sampling blood wrt to TimeZero",
"Units": "s",
},
"MeasurementTime": {
"Description": "Time of measuring counts wrt to TimeZero",
"Units": "s",
},
"CPM": {"Description": "Counts Per Minutes measurement", "Units": "unitless"},
"TC": {"Description": "Total counts measurement", "Units": "unitless"},
}
blood_template = {
"PlasmaAvail": True,
"MetaboliteAvail": False,
"MetaboliteRecoveryCorrectionApplied": False,
"ContinuousBloodAvail": False,
"ContinuousBloodDispersionCorrected": False,
"DiscreteBloodAvail": True,
}
naming_suffix_blood = "_blood.json"
naming_suffix_json = "_recording-blood_discrete.json"
# naming_suffix_tsv = "_recording-blood_discrete.tsv"
def save_json_file(subject_prefix):
with open(subject_prefix + naming_suffix_json, "w") as json_file:
json.dump(plasma_blood_discrite_json_template, json_file, indent=3)
def save_blood_json(subject_prefix):
with open(subject_prefix + naming_suffix_blood, "w") as json_file:
json.dump(blood_template, json_file, indent=3)
# def save_tsv(subject_prefix, injection_start, tsv_raw):
# with open(subject_prefix + naming_suffix_tsv, 'w') as tsv_file:
# pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument(
# "injection_start", help="The start time of injection in format hh:mm:ss"
# )
parser.add_argument("subject", help="the subject information as prefix")
# parser.add_argument("tsv_raw", help="the raw measurments in tsv format")
args = parser.parse_args()
save_json_file(args.subject)
save_blood_json(args.subject)
# save_tsv(args.injection_start, args.tsv_raw)
| 31.741379 | 82 | 0.700706 |
acf5f1855b7d7faff423f3e75ab1f28820bef02a | 286 | py | Python | DailyLife/picOCR_toExcel/main.py | Ayusummer/DailyNotes | af7d0c784eac4de28814eb89c8977f45334d6e62 | [
"MIT"
] | 2 | 2021-05-08T09:54:35.000Z | 2021-09-11T06:54:16.000Z | DailyLife/picOCR_toExcel/main.py | Ayusummer/DailyNotes | af7d0c784eac4de28814eb89c8977f45334d6e62 | [
"MIT"
] | null | null | null | DailyLife/picOCR_toExcel/main.py | Ayusummer/DailyNotes | af7d0c784eac4de28814eb89c8977f45334d6e62 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/5/8 7:27
# @Author : 咸鱼型233
# @File : main.py.py
# @Software: PyCharm
# @Function: 用于测试不同版次的函数的测试文件
# @ChangeLog
from config import APPCODE, path_image
from pic_ocr import OCR_to_Excel_aliyunAPI
OCR_to_Excel_aliyunAPI(APPCODE, path_image)
| 23.833333 | 43 | 0.72028 |
acf5f1f9359b47d7e16f8142855bcd27d8b76a23 | 223 | py | Python | chess/backend/src/utils.py | jacobchrismarsh/chess_senior_project | 7797b1f96fda5d4d268224a21e54a744d17e7b81 | [
"MIT"
] | null | null | null | chess/backend/src/utils.py | jacobchrismarsh/chess_senior_project | 7797b1f96fda5d4d268224a21e54a744d17e7b81 | [
"MIT"
] | 40 | 2019-05-04T04:46:31.000Z | 2022-02-26T10:37:51.000Z | chess/backend/src/utils.py | jacobchrismarsh/chess_senior_project | 7797b1f96fda5d4d268224a21e54a744d17e7b81 | [
"MIT"
] | null | null | null | from user.serializers import UserSerializer
def my_jwt_response_handler(token, user=None, request=None):
return {
"token": token,
"user": UserSerializer(user, context={"request": request}).data,
}
| 24.777778 | 72 | 0.681614 |
acf5f20833f54e8a644ae9cb61890aa58fd7668d | 124 | py | Python | exercicio24.py | monabrisa/-infosatc-lp-avaliativo-01 | 39d8b97162fa0102db1316b977e960bc07cd7299 | [
"MIT"
] | null | null | null | exercicio24.py | monabrisa/-infosatc-lp-avaliativo-01 | 39d8b97162fa0102db1316b977e960bc07cd7299 | [
"MIT"
] | null | null | null | exercicio24.py | monabrisa/-infosatc-lp-avaliativo-01 | 39d8b97162fa0102db1316b977e960bc07cd7299 | [
"MIT"
] | null | null | null | metros = float(input("Digite um valor em m²: "))
acres = metros * 0.000247
print("{} m² são {} acres".format(metros, acres)) | 41.333333 | 49 | 0.669355 |
acf5f32cdae667533f442e27a98208da34e285b9 | 1,112 | py | Python | kubernetes/test/test_policy_v1beta1_pod_security_policy_list.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_policy_v1beta1_pod_security_policy_list.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_policy_v1beta1_pod_security_policy_list.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.policy_v1beta1_pod_security_policy_list import PolicyV1beta1PodSecurityPolicyList
class TestPolicyV1beta1PodSecurityPolicyList(unittest.TestCase):
""" PolicyV1beta1PodSecurityPolicyList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testPolicyV1beta1PodSecurityPolicyList(self):
"""
Test PolicyV1beta1PodSecurityPolicyList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.policy_v1beta1_pod_security_policy_list.PolicyV1beta1PodSecurityPolicyList()
pass
if __name__ == '__main__':
unittest.main()
| 24.711111 | 118 | 0.748201 |
acf5f4d4b21cc3a81d74ad60d490ed84e886fa69 | 2,777 | py | Python | code/api/enrich.py | CiscoSecurity/tr-05-serverless-farsight-dnsdb | 45a0418f3da78c3a99fa42c175fb9e12271e54d3 | [
"MIT"
] | null | null | null | code/api/enrich.py | CiscoSecurity/tr-05-serverless-farsight-dnsdb | 45a0418f3da78c3a99fa42c175fb9e12271e54d3 | [
"MIT"
] | 1 | 2020-06-25T16:19:52.000Z | 2020-06-25T16:19:52.000Z | code/api/enrich.py | CiscoSecurity/tr-05-serverless-farsight-dnsdb | 45a0418f3da78c3a99fa42c175fb9e12271e54d3 | [
"MIT"
] | 1 | 2020-10-12T18:08:48.000Z | 2020-10-12T18:08:48.000Z | from functools import partial
from flask import Blueprint, current_app, g
from api.client import FarsightClient
from api.mappings import Mapping
from api.schemas import ObservableSchema
from api.utils import get_json, jsonify_data, get_key, jsonify_result
enrich_api = Blueprint('enrich', __name__)
get_observables = partial(get_json, schema=ObservableSchema(many=True))
@enrich_api.route('/deliberate/observables', methods=['POST'])
def deliberate_observables():
return jsonify_data({})
@enrich_api.route('/observe/observables', methods=['POST'])
def observe_observables():
key = get_key()
observables = get_observables()
client = FarsightClient(current_app.config['API_URL'],
key,
current_app.config['USER_AGENT'])
g.sightings = []
limit = current_app.config['CTR_ENTITIES_LIMIT']
aggr = current_app.config['AGGREGATE']
time_delta = (current_app.config['NUMBER_OF_DAYS_FOR_FARSIGHT_TIME_FILTER']
if aggr else None)
url_template = current_app.config['UI_SEARCH_URL']
try:
for x in observables:
mapping = Mapping.for_(x)
if mapping:
lookup_data = client.lookup(x, time_delta)
if lookup_data:
refer_link = url_template.format(query=x['value'])
g.sightings.extend(
mapping.extract_sightings(
lookup_data, refer_link, limit, aggr
)
)
except KeyError:
g.errors = [{
'type': 'fatal',
'code': 'key error',
'message': 'The data structure of Farsight DNSDB '
'has changed. The module is broken.'
}]
return jsonify_result()
@enrich_api.route('/refer/observables', methods=['POST'])
def refer_observables():
observables = get_observables()
url_template = current_app.config['UI_SEARCH_URL']
observable_types_map = current_app.config['FARSIGHT_OBSERVABLES']
data = []
for observable in observables:
type_ = observable_types_map.get(observable['type'])
if type_:
data.append(
{
'id': (
'ref-farsight-dnsdb-search-{type}-{value}'.format(
**observable
)
),
'title': f'Search for this {type_}',
'description': f'Lookup this {type_} on Farsight DNSDB',
'url': url_template.format(query=observable['value']),
'categories': ['Search', 'Farsight DNSDB'],
}
)
return jsonify_data(data)
| 30.855556 | 79 | 0.573641 |
acf5f51941dc9fd30ffd9ecf6e7004e1e45338f8 | 22,150 | py | Python | python/tvm/tir/expr.py | optima2005/incubator-tvm | 545f6ea3fede7a99f0a1b2c6933875550214a46d | [
"Apache-2.0"
] | 3 | 2020-03-12T10:25:51.000Z | 2020-08-05T05:36:23.000Z | python/tvm/tir/expr.py | optima2005/incubator-tvm | 545f6ea3fede7a99f0a1b2c6933875550214a46d | [
"Apache-2.0"
] | null | null | null | python/tvm/tir/expr.py | optima2005/incubator-tvm | 545f6ea3fede7a99f0a1b2c6933875550214a46d | [
"Apache-2.0"
] | 1 | 2018-10-19T18:11:41.000Z | 2018-10-19T18:11:41.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Expression AST Node in TVM.
User do not need to deal with expression AST node directly.
But they can be helpful for developer to do quick proptyping.
While not displayed in the document and python file.
Each expression node have subfields that can be visited from python side.
For example, you can use addexp.a to get the left operand of an Add node.
.. code-block:: python
x = tvm.var("n")
y = x + 2
assert(isinstance(y, tvm.tir.Add))
assert(y.a == x)
"""
import tvm._ffi
from tvm.runtime import Object, ObjectGeneric, DataType, TypeCode, const
from tvm.ir import PrimExpr
import tvm.ir._ffi_api
from . import generic as _generic
from . import _ffi_api
def div_ambiguity_error():
return RuntimeError(
"TVM supports multiple types of integer divisions, " +
"please call div, indexdiv/indexmod, floordiv/floormod " +
" or truncdiv/truncmod directly to avoid ambiguity in the code.")
def _dtype_is_int(value):
if isinstance(value, int):
return True
return (isinstance(value, ExprOp) and
DataType(value.dtype).type_code == TypeCode.INT)
def _dtype_is_float(value):
if isinstance(value, float):
return True
return (isinstance(value, ExprOp) and
DataType(value.dtype).type_code == TypeCode.FLOAT)
class ExprOp(object):
"""Operator overloading for Expr like expressions."""
def __add__(self, other):
return _generic.add(self, other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return _generic.subtract(self, other)
def __rsub__(self, other):
return _generic.subtract(other, self)
def __mul__(self, other):
return _generic.multiply(self, other)
def __rmul__(self, other):
return _generic.multiply(other, self)
def __div__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(self, other)
def __rdiv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(other, self)
def __truediv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(self, other)
def __rtruediv__(self, other):
if _dtype_is_int(self) and _dtype_is_int(other):
raise div_ambiguity_error()
return _generic.divide(other, self)
def __floordiv__(self, other):
return _generic.floordiv(self, other)
def __rfloordiv__(self, other):
return _generic.floordiv(other, self)
def __mod__(self, other):
return _ffi_api._OpFloorMod(self, other)
def __rmod__(self, other):
return _ffi_api._OpFloorMod(other, self)
def __neg__(self):
neg_one = const(-1, self.dtype)
return self.__mul__(neg_one)
def __lshift__(self, other):
return _ffi_api.left_shift(self, other)
def __rlshift__(self, other):
return _ffi_api.left_shift(other, self)
def __rshift__(self, other):
return _ffi_api.right_shift(self, other)
def __rrshift__(self, other):
return _ffi_api.right_shift(other, self)
def __and__(self, other):
return _ffi_api.bitwise_and(self, other)
def __rand__(self, other):
return _ffi_api.bitwise_and(other, self)
def __or__(self, other):
return _ffi_api.bitwise_or(self, other)
def __ror__(self, other):
return _ffi_api.bitwise_or(other, self)
def __xor__(self, other):
return _ffi_api.bitwise_xor(self, other)
def __rxor__(self, other):
return _ffi_api.bitwise_xor(other, self)
def __invert__(self):
if _dtype_is_float(self):
raise RuntimeError("Cannot use ~ operator on float type Expr.")
return _ffi_api.Call(self.dtype, "bitwise_not", [self], Call.PureIntrinsic, None, 0)
def __lt__(self, other):
return _ffi_api._OpLT(self, other)
def __le__(self, other):
return _ffi_api._OpLE(self, other)
def __eq__(self, other):
return EqualOp(self, other)
def __ne__(self, other):
return NotEqualOp(self, other)
def __gt__(self, other):
return _ffi_api._OpGT(self, other)
def __ge__(self, other):
return _ffi_api._OpGE(self, other)
def __nonzero__(self):
raise ValueError("Cannot use and / or / not operator to Expr, hint: " +
"use tvm.all / tvm.any instead")
def __bool__(self):
return self.__nonzero__()
def equal(self, other):
"""Build an equal check expression with other expr.
Parameters
----------
other : PrimExpr
The other expression
Returns
-------
ret : PrimExpr
The equality expression.
"""
return _ffi_api._OpEQ(self, other)
def astype(self, dtype):
"""Cast the expression to other type.
Parameters
----------
dtype : str
The type of new expression
Returns
-------
expr : PrimExpr
Expression with new type
"""
return _generic.cast(self, dtype)
class EqualOp(ObjectGeneric, ExprOp):
"""Deferred equal operator.
This is used to support sugar that a == b can either
mean Object.same_as or Object.equal.
Parameters
----------
a : PrimExpr
Left operand.
b : PrimExpr
Right operand.
"""
# This class is not manipulated by C++. So use python's identity check function is sufficient
same_as = object.__eq__
def __init__(self, a, b):
self.a = a
self.b = b
def __nonzero__(self):
return self.a.same_as(self.b)
def __bool__(self):
return self.__nonzero__()
def asobject(self):
"""Convert object."""
return _ffi_api._OpEQ(self.a, self.b)
class NotEqualOp(ObjectGeneric, ExprOp):
"""Deferred NE operator.
This is used to support sugar that a != b can either
mean not Object.same_as or make.NE.
Parameters
----------
a : PrimExpr
Left operand.
b : PrimExpr
Right operand.
"""
# This class is not manipulated by C++. So use python's identity check function is sufficient
same_as = object.__eq__
def __init__(self, a, b):
self.a = a
self.b = b
def __nonzero__(self):
return not self.a.same_as(self.b)
def __bool__(self):
return self.__nonzero__()
def asobject(self):
"""Convert object."""
return _ffi_api._OpNE(self.a, self.b)
class PrimExprWithOp(ExprOp, PrimExpr):
"""Helper base class to inherit from PrimExpr."""
# In Python3, We have to explicitly tell interpreter to retain __hash__ if we overide __eq__
# https://docs.python.org/3.1/reference/datamodel.html#object.__hash__
__hash__ = PrimExpr.__hash__
class ConstExpr(PrimExprWithOp):
pass
class BinaryOpExpr(PrimExprWithOp):
pass
class CmpExpr(PrimExprWithOp):
pass
class LogicalExpr(PrimExprWithOp):
pass
@tvm._ffi.register_object("Variable")
class Var(PrimExprWithOp):
"""Symbolic variable.
Parameters
----------
name : str
The name
dtype : str
The data type
"""
def __init__(self, name, dtype):
self.__init_handle_by_constructor__(
_ffi_api.Var, name, dtype)
@tvm._ffi.register_object
class SizeVar(Var):
"""Symbolic variable to represent a tensor index size
which is greater or equal to zero.
Parameters
----------
name : str
The name
dtype : int
The data type
"""
# pylint: disable=super-init-not-called
def __init__(self, name, dtype):
self.__init_handle_by_constructor__(
_ffi_api.SizeVar, name, dtype)
@tvm._ffi.register_object
class IterVar(Object, ExprOp):
"""Represent iteration variable.
IterVar represents axis iterations in the computation.
Parameters
----------
dom : Range
The domain of the iteration.
var : Union[Var, str]
The internal variable that is used for iteration.
iter_type : int
The iteration type.
thread_tag : str
The thread type tag.
See Also
--------
tvm.thread_axis: Create thread axis IterVar.
tvm.reduce_axis: Create reduce axis IterVar.
"""
DataPar = 0
ThreadIndex = 1
CommReduce = 2
Ordered = 3
DimInfo = 4
Unrolled = 5
Vectorized = 6
Parallelized = 7
Tensorized = 8
def __init__(self, dom, var, iter_type, thread_tag=""):
if dom is not None:
if isinstance(dom, (list, tuple)):
if len(dom) != 2:
raise TypeError("need to be list of ranges")
dom = tvm.ir.Range(dom[0], dom[1])
if not isinstance(dom, tvm.ir.Range):
raise TypeError("dom need to be Range")
name = var if var is not None else "iter"
var = Var(name, dtype="int32") if not isinstance(var, Var) else var
self.__init_handle_by_constructor__(
_ffi_api.IterVar, dom, var, iter_type, thread_tag)
@tvm._ffi.register_object
class CommReducer(Object):
"""Communicative reduce operator
Parameters
----------
lhs : List[Var]
The left arguments of the reducer.
rhs : List[Var]
The right arguments of the reducer.
result : List[PrimExpr]
The reduction results.
identity_element : List[PrimExpr]
The identity elements.
"""
def __init__(self, lhs, rhs, result, identity_element):
self.__init_handle_by_constructor__(
_ffi_api.CommReducer, lhs, rhs, result, identity_element)
@tvm._ffi.register_object
class Reduce(PrimExprWithOp):
"""Reduce node.
Parameters
----------
combiner : CommReducer
The combiner.
src : list of Expr
The source expression.
rdom : list of IterVar
The iteration domain
condition : PrimExpr
The reduce condition.
value_index : int
The value index.
"""
def __init__(self, combiner, src, rdom, condition, value_index):
self.__init_handle_by_constructor__(
_ffi_api.Reduce, combiner, src, rdom,
condition, value_index)
@tvm._ffi.register_object
class FloatImm(ConstExpr):
"""Float constant.
Parameters
----------
dtype : str
The data type
value : float
The constant value.
"""
def __init__(self, dtype, value):
self.__init_handle_by_constructor__(
tvm.ir._ffi_api.FloatImm, dtype, value)
@tvm._ffi.register_object
class IntImm(ConstExpr):
"""Int constant.
Parameters
----------
dtype : str
The data type
value : int
The constant value.
"""
def __init__(self, dtype, value):
self.__init_handle_by_constructor__(
tvm.ir._ffi_api.IntImm, dtype, value)
def __int__(self):
return self.value
@tvm._ffi.register_object
class StringImm(ConstExpr):
"""String constant.
Parameters
----------
value : str
The value of the function.
"""
def __init__(self, value):
self.__init_handle_by_constructor__(
_ffi_api.StringImm, value)
def __eq__(self, other):
if isinstance(other, ConstExpr):
return self.value == other.value
return self.value == other
def __ne__(self, other):
if isinstance(other, ConstExpr):
return self.value != other.value
return self.value != other
@tvm._ffi.register_object
class Cast(PrimExprWithOp):
"""Cast expression.
Parameters
----------
dtype : str
The data type
value : PrimExpr
The value of the function.
"""
def __init__(self, dtype, value):
self.__init_handle_by_constructor__(
_ffi_api.Cast, dtype, value)
@tvm._ffi.register_object
class Add(BinaryOpExpr):
"""Add node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.Add, a, b)
@tvm._ffi.register_object
class Sub(BinaryOpExpr):
"""Sub node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.Sub, a, b)
@tvm._ffi.register_object
class Mul(BinaryOpExpr):
"""Mul node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.Mul, a, b)
@tvm._ffi.register_object
class Div(BinaryOpExpr):
"""Div node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.Div, a, b)
@tvm._ffi.register_object
class Mod(BinaryOpExpr):
"""Mod node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.Mod, a, b)
@tvm._ffi.register_object
class FloorDiv(BinaryOpExpr):
"""FloorDiv node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.FloorDiv, a, b)
@tvm._ffi.register_object
class FloorMod(BinaryOpExpr):
"""FloorMod node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.FloorMod, a, b)
@tvm._ffi.register_object
class Min(BinaryOpExpr):
"""Min node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.Min, a, b)
@tvm._ffi.register_object
class Max(BinaryOpExpr):
"""Max node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.Max, a, b)
@tvm._ffi.register_object
class EQ(CmpExpr):
"""EQ node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.EQ, a, b)
@tvm._ffi.register_object
class NE(CmpExpr):
"""NE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.NE, a, b)
@tvm._ffi.register_object
class LT(CmpExpr):
"""LT node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.LT, a, b)
@tvm._ffi.register_object
class LE(CmpExpr):
"""LE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.LE, a, b)
@tvm._ffi.register_object
class GT(CmpExpr):
"""GT node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.GT, a, b)
@tvm._ffi.register_object
class GE(CmpExpr):
"""GE node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.GE, a, b)
@tvm._ffi.register_object
class And(LogicalExpr):
"""And node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.And, a, b)
@tvm._ffi.register_object
class Or(LogicalExpr):
"""Or node.
Parameters
----------
a : PrimExpr
The left hand operand.
b : PrimExpr
The right hand operand.
"""
def __init__(self, a, b):
self.__init_handle_by_constructor__(
_ffi_api.Or, a, b)
@tvm._ffi.register_object
class Not(LogicalExpr):
"""Not node.
Parameters
----------
a : PrimExpr
The input value
"""
def __init__(self, a):
self.__init_handle_by_constructor__(
_ffi_api.Not, a)
@tvm._ffi.register_object
class Select(PrimExprWithOp):
"""Select node.
Note
----
Select may compute both true_value and false_value.
Use :py:class:`tvm.if_then_else` instead if you want to
get a conditional expression that only evaluates
the correct branch.
Parameters
----------
condition : PrimExpr
The condition expression.
true_value : PrimExpr
The value to take when condition is true.
false_value : PrimExpr
The value to take when condition is false.
"""
def __init__(self, condition, true_value, false_value):
self.__init_handle_by_constructor__(
_ffi_api.Select, condition, true_value, false_value)
@tvm._ffi.register_object
class Load(PrimExprWithOp):
"""Load node.
Parameters
----------
dtype : str
The data type.
buffer_var : Var
The buffer variable in the load expression.
index : PrimExpr
The index in the load.
predicate : PrimExpr
The load predicate.
"""
def __init__(self, dtype, buffer_var, index, predicate=None):
args = [] if predicate is None else [predicate]
self.__init_handle_by_constructor__(
_ffi_api.Load, dtype, buffer_var, index, *args)
@tvm._ffi.register_object
class Ramp(PrimExprWithOp):
"""Ramp node.
Parameters
----------
base : PrimExpr
The base expression.
stride : ramp stride
The stride of the ramp.
lanes : int
The lanes of the expression.
"""
def __init__(self, base, stride, lanes):
self.__init_handle_by_constructor__(
_ffi_api.Ramp, base, stride, lanes)
@tvm._ffi.register_object
class Broadcast(PrimExprWithOp):
"""Broadcast node.
Parameters
----------
value : PrimExpr
The value of the expression.
lanes : int
The lanes of the expression.
"""
def __init__(self, value, lanes):
self.__init_handle_by_constructor__(
_ffi_api.Broadcast, value, lanes)
@tvm._ffi.register_object
class Shuffle(PrimExprWithOp):
"""Shuffle node.
Parameters
----------
vectors : Array of Expr
The vectors
indices : Array of indices
The indices
"""
def __init__(self, vectors, indices):
self.__init_handle_by_constructor__(
_ffi_api.Shuffle, vectors, indices)
@tvm._ffi.register_object
class Call(PrimExprWithOp):
"""Call node.
Parameters
----------
dtype : str
The return data type
name : str
The name of the function
args : list of Expr
The input arguments to the call
call_type : int
The type of the call
func : Operation, optional
Operation if call_type is Halide
value_index : int
The output value index
"""
Extern = 0
ExternCPlusPlus = 1
PureExtern = 2
Halide = 3
Intrinsic = 4
PureIntrinsic = 5
def __init__(self, dtype, name, args, call_type, func, value_index):
self.__init_handle_by_constructor__(
_ffi_api.Call, dtype, name, args, call_type, func, value_index)
@tvm._ffi.register_object
class Let(PrimExprWithOp):
"""Let node.
Parameters
----------
var : Var
The variable in the binding.
value : PrimExpr
The value in to be binded.
body : PrimExpr
The body expression.
"""
def __init__(self, var, value, body):
self.__init_handle_by_constructor__(
_ffi_api.Let, var, value, body)
@tvm._ffi.register_object
class Any(PrimExpr):
"""Any node.
"""
def __init__(self):
self.__init_handle_by_constructor__(_ffi_api.Any)
| 22.717949 | 97 | 0.616298 |
acf5f5620d10e0939827fbb9dbfb2095accd14c3 | 1,222 | py | Python | tools/plot_utils.py | oval-group/decomposition-plnn-bounds | 1f2548bf422a5c6ac235cfde2b6f467f850f65a1 | [
"MIT"
] | 2 | 2021-02-15T13:59:40.000Z | 2022-03-10T21:18:17.000Z | tools/plot_utils.py | oval-group/decomposition-plnn-bounds | 1f2548bf422a5c6ac235cfde2b6f467f850f65a1 | [
"MIT"
] | null | null | null | tools/plot_utils.py | oval-group/decomposition-plnn-bounds | 1f2548bf422a5c6ac235cfde2b6f467f850f65a1 | [
"MIT"
] | 1 | 2021-03-22T01:20:31.000Z | 2021-03-22T01:20:31.000Z | import matplotlib.pyplot as plt
import matplotlib
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
def custom_plot(fignumber, x, y, STD, xlabel, ylabel, title, errorbars=False, labelname="", dotted="-", xlog=False,
ylog=False, lw=1.15, color=None):
# Utility function for better (clean, customisable) plotting.
fontsize = 10
matplotlib.rcParams.update({'font.size': fontsize})
plt.figure(fignumber, figsize=(8, 5))
additional_args = {}
error_args = {}
if dotted == "dashed":
additional_args['dashes'] = (5, 5)
if color:
additional_args['color'] = color
error_args['color'] = color
if not errorbars:
additional_args['marker'] = 'x'
if dotted != "-":
lw += 0.15
plt.plot(x, y, linestyle=dotted, label=labelname, ms=4, linewidth=lw, **additional_args)
if errorbars:
plt.fill_between(x, y-STD[0], y+STD[1], alpha=0.12, **error_args)
if xlog:
plt.xscale('log', nonposx='clip')
if ylog:
plt.yscale('log', nonposy='clip')
plt.grid(True)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title)
plt.legend(fontsize=fontsize)
| 33.944444 | 115 | 0.630933 |
acf5f58be2d9830f7736e18837f419c00241afca | 3,267 | py | Python | addons/website_event_sale/controllers/main.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | addons/website_event_sale/controllers/main.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | addons/website_event_sale/controllers/main.py | jjiege/odoo | fd5b8ad387c1881f349d125cbd56433f4d49398f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http, _
from odoo.addons.website_event.controllers.main import WebsiteEventController
from odoo.http import request
class WebsiteEventSaleController(WebsiteEventController):
@http.route()
def event_register(self, event, **post):
event = event.with_context(pricelist=request.website.id)
if not request.context.get('pricelist'):
pricelist = request.website.get_current_pricelist()
if pricelist:
event = event.with_context(pricelist=pricelist.id)
return super(WebsiteEventSaleController, self).event_register(event, **post)
def _process_tickets_details(self, data):
ticket_post = {}
for key, value in data.items():
if not key.startswith('nb_register') or '-' not in key:
continue
items = key.split('-')
if len(items) < 2:
continue
ticket_post[int(items[1])] = int(value)
tickets = request.env['event.event.ticket'].browse(tuple(ticket_post))
return [{'id': ticket.id, 'name': ticket.name, 'quantity': ticket_post[ticket.id], 'price': ticket.price} for ticket in tickets if ticket_post[ticket.id]]
@http.route()
def registration_confirm(self, event, **post):
order = request.website.sale_get_order(force_create=1)
attendee_ids = set()
registrations = self._process_registration_details(post)
for registration in registrations:
ticket = request.env['event.event.ticket'].sudo().browse(int(registration['ticket_id']))
cart_values = order.with_context(event_ticket_id=ticket.id, fixed_price=True)._cart_update(product_id=ticket.product_id.id, add_qty=1, registration_data=[registration])
attendee_ids |= set(cart_values.get('attendee_ids', []))
# free tickets -> order with amount = 0: auto-confirm, no checkout
if not order.amount_total:
order.action_confirm() # tde notsure: email sending ?
attendees = request.env['event.registration'].browse(list(attendee_ids)).sudo()
# clean context and session, then redirect to the confirmation page
request.website.sale_reset()
urls = event._get_event_resource_urls(list(attendee_ids))
return request.render("website_event.registration_complete", {
'attendees': attendees,
'event': event,
'google_url': urls.get('google_url'),
'iCal_url': urls.get('iCal_url')
})
return request.redirect("/shop/checkout")
def _add_event(self, event_name="New Event", context=None, **kwargs):
product = request.env.ref('event_sale.product_product_event', raise_if_not_found=False)
if product:
context = dict(context or {}, default_event_ticket_ids=[[0, 0, {
'name': _('Registration'),
'product_id': product.id,
'deadline': False,
'seats_max': 1000,
'price': 0,
}]])
return super(WebsiteEventSaleController, self)._add_event(event_name, context, **kwargs)
| 46.671429 | 180 | 0.63667 |
acf5f5e007aa91baee7af94709c40f362e30f3eb | 7,889 | py | Python | src/tests/bioinformatics_i/week01/test_regulatory_motifs.py | paul-reiners/dna-analysis | 1ec5b2e2e5d264dae66181908112ce02728158d8 | [
"Apache-2.0"
] | null | null | null | src/tests/bioinformatics_i/week01/test_regulatory_motifs.py | paul-reiners/dna-analysis | 1ec5b2e2e5d264dae66181908112ce02728158d8 | [
"Apache-2.0"
] | null | null | null | src/tests/bioinformatics_i/week01/test_regulatory_motifs.py | paul-reiners/dna-analysis | 1ec5b2e2e5d264dae66181908112ce02728158d8 | [
"Apache-2.0"
] | null | null | null | from bioinformatics_i.week01.regulatory_motifs import motif_d, get_median_strings, pr, compute_entropy, \
get_consensus_strings, count, profile, score, count_with_pseudocounts, profile_with_pseudocounts, d
def test_d():
pattern = 'GATTCTCA'
string = 'GCAAAGACGCTGACCAA'
distance = d(pattern, string)
assert distance == 3
def test_motif_d():
pattern = 'AAA'
dna = {'TTACCTTAAC', 'GATATCTGTC', 'ACGGCGTTCG', 'CCCTAAAGAG', 'CGTCAGAGGT'}
distance = motif_d(pattern, dna)
assert distance == 5
def test_get_median_strings():
motifs = {'AAATTGACGCAT', 'GACGACCACGTT', 'CGTCAGCGCCTG', 'GCTGAGCACCGG', 'AGTTCGGGACAG'}
computed_median_strings = set(get_median_strings(3, motifs))
expected_median_strings = {'GAC'}
assert computed_median_strings == expected_median_strings
def test_get_median_strings_2():
motifs = {'CTCGATGAGTAGGAAAGTAGTTTCACTGGGCGAACCACCCCGGCGCTAATCCTAGTGCCC',
'GCAATCCTACCCGAGGCCACATATCAGTAGGAACTAGAACCACCACGGGTGGCTAGTTTC',
'GGTGTTGAACCACGGGGTTAGTTTCATCTATTGTAGGAATCGGCTTCAAATCCTACACAG'}
computed_median_strings = set(get_median_strings(7, motifs))
expected_median_strings = {'GTAGGAA', 'GAACCAC', 'AATCCTA', 'TAGTTTC'}
assert computed_median_strings == expected_median_strings
def test_pr_2():
prfl = {
'A': [0.2, 0.2, 0.0, 0.0, 0.0, 0.0, 0.9, 0.1, 0.1, 0.1, 0.3, 0.0],
'C': [0.1, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4, 0.1, 0.2, 0.4, 0.0],
'G': [0.0, 0.0, 1.0, 1.0, 0.9, 0.9, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
'T': [0.7, 0.2, 0.0, 0.0, 0.1, 0.1, 0.0, 0.5, 0.8, 0.7, 0.3, 0.4]
}
result = pr('TCGTGGATTTCC', prfl)
assert abs(result - 0.0) < 0.01
def test_compute_entropy():
epsilon = 0.01
probs1 = [0.2, 0.6, 0.0, 0.2]
entropy1 = compute_entropy(probs1)
assert abs(entropy1 - 1.371) < epsilon
probs2 = [0.0, 0.6, 0.0, 0.4]
entropy2 = compute_entropy(probs2)
assert abs(entropy2 - 0.971) < epsilon
probs3 = [0.0, 0.0, 0.9, 0.1]
entropy3 = compute_entropy(probs3)
assert abs(entropy3 - 0.467) < epsilon
def test_get_consensus_strings():
probs = {'A': [0.4, 0.3, 0.0, 0.1, 0.0, 0.9],
'C': [0.2, 0.3, 0.0, 0.4, 0.0, 0.1],
'G': [0.1, 0.3, 1.0, 0.1, 0.5, 0.0],
'T': [0.3, 0.1, 0.0, 0.4, 0.5, 0.0]}
computed_consensus_strings = set(get_consensus_strings(probs))
expected_consensus_strings = {'AAGCGA', 'ACGCGA', 'AGGCGA', 'AAGTGA', 'ACGTGA', 'AGGTGA', 'AAGCTA', 'ACGCTA',
'AGGCTA', 'AAGTTA', 'ACGTTA', 'AGGTTA'}
assert computed_consensus_strings == expected_consensus_strings
def test_get_consensus_strings_2():
probs = {'A': [0.4, 0.3, 0.0, 0.1, 0.0, 0.9],
'C': [0.2, 0.3, 0.0, 0.4, 0.0, 0.1],
'G': [0.1, 0.3, 1.0, 0.1, 0.5, 0.0],
'T': [0.3, 0.1, 0.0, 0.4, 0.5, 0.0]}
computed_consensus_strings = set(get_consensus_strings(probs))
expected_consensus_strings = \
{'AGGCGA', 'AAGCGA', 'ACGTTA', 'AGGTTA', 'ACGCGA', 'AAGCTA', 'ACGTGA', 'AGGCTA', 'AAGTTA', 'ACGCTA', 'AGGTGA',
'AAGTGA'}
assert computed_consensus_strings == expected_consensus_strings
def test_count():
motifs = [['T', 'C', 'G', 'G', 'G', 'G', 'g', 'T', 'T', 'T', 't', 't'],
['c', 'C', 'G', 'G', 't', 'G', 'A', 'c', 'T', 'T', 'a', 'C'],
['a', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'T', 't', 'C'],
['T', 't', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 't', 't'],
['a', 'a', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 'C', 'C'],
['T', 't', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 'C', 'C'],
['T', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'c', 'a', 't'],
['T', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'c', 'C', 't'],
['T', 'a', 'G', 'G', 'G', 'G', 'A', 'a', 'c', 'T', 'a', 'C'],
['T', 'C', 'G', 'G', 'G', 't', 'A', 'T', 'a', 'a', 'C', 'C']]
computed_result = count(motifs)
expected_result = {'A': [2, 2, 0, 0, 0, 0, 9, 1, 1, 1, 3, 0],
'C': [1, 6, 0, 0, 0, 0, 0, 4, 1, 2, 4, 6],
'G': [0, 0, 10, 10, 9, 9, 1, 0, 0, 0, 0, 0],
'T': [7, 2, 0, 0, 1, 1, 0, 5, 8, 7, 3, 4]}
for nucleotide in 'ACGT':
assert computed_result[nucleotide] == expected_result[nucleotide]
def test_profile():
motifs = [['T', 'C', 'G', 'G', 'G', 'G', 'g', 'T', 'T', 'T', 't', 't'],
['c', 'C', 'G', 'G', 't', 'G', 'A', 'c', 'T', 'T', 'a', 'C'],
['a', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'T', 't', 'C'],
['T', 't', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 't', 't'],
['a', 'a', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 'C', 'C'],
['T', 't', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 'C', 'C'],
['T', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'c', 'a', 't'],
['T', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'c', 'C', 't'],
['T', 'a', 'G', 'G', 'G', 'G', 'A', 'a', 'c', 'T', 'a', 'C'],
['T', 'C', 'G', 'G', 'G', 't', 'A', 'T', 'a', 'a', 'C', 'C']]
computed_result = profile(motifs)
expected_result = {'A': [0.2, 0.2, 0.0, 0.0, 0.0, 0.0, 0.9, 0.1, 0.1, 0.1, 0.3, 0.0],
'C': [0.1, 0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4, 0.1, 0.2, 0.4, 0.6],
'G': [0.0, 0.0, 1.0, 1.0, 0.9, 0.9, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
'T': [0.7, 0.2, 0.0, 0.0, 0.1, 0.1, 0.0, 0.5, 0.8, 0.7, 0.3, 0.4]}
for nucleotide in 'ACGT':
assert computed_result[nucleotide] == expected_result[nucleotide]
def test_score():
motifs = [['T', 'C', 'G', 'G', 'G', 'G', 'g', 'T', 'T', 'T', 't', 't'],
['c', 'C', 'G', 'G', 't', 'G', 'A', 'c', 'T', 'T', 'a', 'C'],
['a', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'T', 't', 'C'],
['T', 't', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 't', 't'],
['a', 'a', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 'C', 'C'],
['T', 't', 'G', 'G', 'G', 'G', 'A', 'c', 'T', 'T', 'C', 'C'],
['T', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'c', 'a', 't'],
['T', 'C', 'G', 'G', 'G', 'G', 'A', 'T', 'T', 'c', 'C', 't'],
['T', 'a', 'G', 'G', 'G', 'G', 'A', 'a', 'c', 'T', 'a', 'C'],
['T', 'C', 'G', 'G', 'G', 't', 'A', 'T', 'a', 'a', 'C', 'C']]
computed_result = score(motifs)
expected_result = 9.916290005356972
assert abs(computed_result - expected_result) < 0.1
def test_count_with_pseudocounts():
motifs = [['T', 'A', 'A', 'C'],
['G', 'T', 'C', 'T'],
['A', 'C', 'T', 'A'],
['A', 'G', 'G', 'T']]
calculated_results = count_with_pseudocounts(motifs)
expected_results = {'A': [2 + 1, 1 + 1, 1 + 1, 1 + 1],
'C': [0 + 1, 1 + 1, 1 + 1, 1 + 1],
'G': [1 + 1, 1 + 1, 1 + 1, 0 + 1],
'T': [1 + 1, 1 + 1, 1 + 1, 2 + 1]}
for nucleotide in expected_results:
assert expected_results[nucleotide] == calculated_results[nucleotide]
def test_profile_with_pseudocounts():
motifs = [['T', 'A', 'A', 'C'],
['G', 'T', 'C', 'T'],
['A', 'C', 'T', 'A'],
['A', 'G', 'G', 'T']]
calculated_results = profile_with_pseudocounts(motifs)
expected_results = {'A': [3/8, 2/8, 2/8, 2/8],
'C': [1/8, 2/8, 2/8, 2/8],
'G': [2/8, 2/8, 2/8, 1/8],
'T': [2/8, 2/8, 2/8, 3/8]}
for nucleotide in expected_results:
expected_probs = expected_results[nucleotide]
actual_probs = calculated_results[nucleotide]
for i in range(len(expected_probs)):
assert abs(expected_probs[i] - actual_probs[i]) < 0.01
| 46.405882 | 118 | 0.437825 |
acf5f5e56c77474dd8620d5f76a940a1697c850e | 1,493 | py | Python | smarttypes/utils/postgres_handle.py | greeness/SmartTypes | 6598f1566fd7c49ba22c0262a282aaf3e4518b0c | [
"MIT",
"Unlicense"
] | 2 | 2015-08-21T10:29:27.000Z | 2019-12-13T23:47:42.000Z | smarttypes/utils/postgres_handle.py | greeness/SmartTypes | 6598f1566fd7c49ba22c0262a282aaf3e4518b0c | [
"MIT",
"Unlicense"
] | null | null | null | smarttypes/utils/postgres_handle.py | greeness/SmartTypes | 6598f1566fd7c49ba22c0262a282aaf3e4518b0c | [
"MIT",
"Unlicense"
] | null | null | null | import psycopg2
class PostgresHandle(object):
def __init__(self, connection_string):
self.connection_string = connection_string
@property
def connection(self):
if not '_connection' in self.__dict__:
self._connection = psycopg2.connect(self.connection_string)
return self._connection
def execute_query(self, query_string, params=None, return_results=True, print_qry=False):
params = params if params else {}
cursor = self.connection.cursor()
cursor.execute(query_string, params)
column_names = cursor.description
cursor_results = []
if return_results:
cursor_results = cursor.fetchall()
cursor.close()
#if results have two columns with the same name, for
#example you join two tables that both have id columns
#this thang will raise an Exception
if cursor_results:
if len(column_names) != len(cursor_results[0]):
raise Exception("PostgresHandle.execute_query has some dup column names in the select clause.")
rows = []
for cursor_result in cursor_results:
row = {}
for i in range(len(column_names)):
name = column_names[i][0]
value = cursor_result[i]
row[name] = value
rows.append(row)
return rows
| 32.456522 | 111 | 0.582719 |
acf5f8c57f423064f4291b25ebd1adfd23ca6ad9 | 250 | py | Python | sum natural numbers.py | jonckheereke/algorithms-puthon-intro-ex | b69e0471e814e45390d9f8d89019dd5296da18a4 | [
"Apache-2.0"
] | null | null | null | sum natural numbers.py | jonckheereke/algorithms-puthon-intro-ex | b69e0471e814e45390d9f8d89019dd5296da18a4 | [
"Apache-2.0"
] | null | null | null | sum natural numbers.py | jonckheereke/algorithms-puthon-intro-ex | b69e0471e814e45390d9f8d89019dd5296da18a4 | [
"Apache-2.0"
] | null | null | null | summation = 0
def calculate_totalsum(end, begin = 0):
totalsum = 0
for start in range(begin,end):
if(start%7==0) or (start%9==0):
totalsum+=start
return totalsum
summation=calculate_totalsum(10000)
print(summation)
| 20.833333 | 39 | 0.652 |
acf5f8d2857ec572e12a5bf3fb41f677f6cba16a | 845 | py | Python | operators/mute_nodes.py | MarcoHoo/RenderStackNode | e9624ccd4ebd4f72bd5b332205574bb053dbcb8d | [
"Apache-2.0"
] | 37 | 2020-11-30T04:10:50.000Z | 2021-11-11T09:49:23.000Z | operators/mute_nodes.py | MarcoHoo/RenderStackNode | e9624ccd4ebd4f72bd5b332205574bb053dbcb8d | [
"Apache-2.0"
] | 4 | 2021-07-28T13:22:26.000Z | 2021-08-03T09:27:29.000Z | operators/mute_nodes.py | MarcoHoo/RenderStackNode | e9624ccd4ebd4f72bd5b332205574bb053dbcb8d | [
"Apache-2.0"
] | 5 | 2020-11-30T10:52:28.000Z | 2021-09-04T03:40:05.000Z | import bpy
from bpy.props import StringProperty
class RSN_OT_MuteNodes(bpy.types.Operator):
bl_idname = "rsn.mute_nodes"
bl_label = "Mute Nodes"
node_name: StringProperty(default='')
@classmethod
def poll(self, context):
return context.space_data.edit_tree and context.space_data.edit_tree.bl_idname == 'RenderStackNodeTree'
def execute(self, context):
if self.node_name == '':
nodes = context.selected_nodes
for node in nodes:
node.mute = 0 if node.mute else 1
else:
node = bpy.context.space_data.edit_tree.nodes[self.node_name]
node.mute = 0 if node.mute else 1
return {'FINISHED'}
def register():
bpy.utils.register_class(RSN_OT_MuteNodes)
def unregister():
bpy.utils.unregister_class(RSN_OT_MuteNodes)
| 24.852941 | 111 | 0.662722 |
acf5f99f521a91b93c560520d6c5399d34ec5c02 | 28 | py | Python | parsewkt/__init__.py | cleder/parsewkt | 728579d79a37a5ad413abceac5e8349f70380624 | [
"BSD-2-Clause"
] | 12 | 2015-01-26T00:39:42.000Z | 2021-07-01T16:15:17.000Z | parsewkt/__init__.py | cleder/parsewkt | 728579d79a37a5ad413abceac5e8349f70380624 | [
"BSD-2-Clause"
] | 1 | 2020-05-22T08:26:09.000Z | 2020-05-24T16:58:53.000Z | parsewkt/__init__.py | cleder/parsewkt | 728579d79a37a5ad413abceac5e8349f70380624 | [
"BSD-2-Clause"
] | 3 | 2015-11-22T01:09:34.000Z | 2016-05-26T20:57:54.000Z | #
from .wkt import from_wkt
| 9.333333 | 25 | 0.75 |
acf5f9e9401d4a740c856b95f9310bcc154752ad | 3,847 | py | Python | plugins/record.py | Avedo/Ezrael | ce2b9ac40ed6100ec86267ab0636a442c7a83f7a | [
"Apache-2.0"
] | 1 | 2016-09-24T18:11:43.000Z | 2016-09-24T18:11:43.000Z | plugins/record.py | Avedo/Ezrael | ce2b9ac40ed6100ec86267ab0636a442c7a83f7a | [
"Apache-2.0"
] | 6 | 2015-05-15T13:21:13.000Z | 2015-06-23T18:47:22.000Z | plugins/record.py | Bornageek/Ezrael | ce2b9ac40ed6100ec86267ab0636a442c7a83f7a | [
"Apache-2.0"
] | 1 | 2016-02-26T14:17:38.000Z | 2016-02-26T14:17:38.000Z | __author__ = 'frissdiegurke'
# TODO: Use the new messaging interface.
import os
import json
from core.plugin import Plugin
VERSION = 0
class Echo:
message = None
def __init__(self):
self.message = []
def add_line(self, msg):
self.message.append(msg)
def revert(self, amount):
for i in range(1, amount):
self.message.pop()
class Record(Plugin):
registry = {"__v": VERSION}
current = {}
registry_file = ""
def init(self):
self.registry_file = os.path.join(self.context['base_path'], "plugins/data/record-registry.json")
try:
with open(self.registry_file, 'r') as f:
reg = json.load(f)
if reg["__v"] == VERSION:
self.registry = reg
elif reg["__v"] < VERSION:
print("Record-Plugin: Config version conflict detected.")
except FileNotFoundError:
pass
def attempt_save(self, echo, channel, nick, name, overwrite):
if not overwrite and name in self.registry:
self.send_message("Record already existing. Use !overwrite instead.", nick)
else:
self.registry[name] = echo.message
del self.current[channel][nick]
def on_message(self, message):
name = message.content[1:].lower()
if len(message.content) and message.content[0] == "$" and name in self.registry:
for l in self.registry[name]:
self.send_message(l, message.channel)
return
# only admins are allowed to define/change records
if message.nick.lower() not in self.context['admins']:
return
if len(message.cmd):
if message.cmd[0] == "record":
if message.channel not in self.current:
self.current[message.channel] = {}
self.current[message.channel][message.nick] = Echo()
return
if message.cmd[0] == "persist":
try:
with open(self.registry_file, 'w') as f:
json.dump(self.registry, f)
except FileNotFoundError:
os.makedirs(os.path.dirname(self.registry_file))
with open(self.registry_file, 'w') as f:
json.dump(self.registry, f)
except PermissionError:
print("NOTICE: No permission to persist records.")
self.send_message("Filesystem permission error while attempting to store records.", message.nick)
return
if message.cmd[0] == 'erase' and len(message.cmd) > 1:
del self.registry[message.cmd[1].lower()]
return
# stop here if not recording
if message.channel not in self.current or message.nick not in self.current[message.channel]:
return
echo = self.current[message.channel][message.nick]
if len(message.cmd):
if message.cmd[0] == 'stop':
del self.current[message.channel][message.nick]
elif message.cmd[0] == 'ignore':
pass
elif len(message.cmd) > 1:
if message.cmd[0] == 'revert':
echo.revert(int(message.cmd[1]) or 1)
elif message.cmd[0] == 'save':
self.attempt_save(echo, message.channel, message.nick, message.cmd[1].lower(), False)
elif message.cmd[0] == 'overwrite':
self.attempt_save(echo, message.channel, message.nick, message.cmd[1].lower(), True)
else:
echo.add_line(message.content)
else:
echo.add_line(message.content)
else:
echo.add_line(message.content)
| 34.657658 | 117 | 0.54744 |
acf5fc1701ad91dfe7ed7d3f07d4e38717420d96 | 2,496 | py | Python | fundamentals/measures/enterprise_value.py | marcellogoccia/deep-value-investing | 4d45cc92c157246485b638d2052596a76975ec8a | [
"MIT"
] | null | null | null | fundamentals/measures/enterprise_value.py | marcellogoccia/deep-value-investing | 4d45cc92c157246485b638d2052596a76975ec8a | [
"MIT"
] | null | null | null | fundamentals/measures/enterprise_value.py | marcellogoccia/deep-value-investing | 4d45cc92c157246485b638d2052596a76975ec8a | [
"MIT"
] | null | null | null | from utilities.common_methods import getDebugInfo
from utilities.common_methods import Methods as methods
from utilities.exchange_rates import Exchange
import fundamentals.miscellaneous as fund_utils
from fundamentals.measures.market_cap_preferred_shares import get_market_cap_preferred_shares
from fundamentals.measures.market_cap import get_market_cap
from utilities import log
def get_enterprise_value(equity, year=None, market_cap=None):
"""
@function get_enterprise_value
The function returns the enterprise value.
The enterprise value is defined as the
"""
try:
# get last year in digits
if year is None:
market_cap = get_market_cap(equity)
year = methods.get_last_year()
elif market_cap is None:
market_cap = get_market_cap(equity, year)
# extract the balance sheet we are interested in
balance_sheet = fund_utils.gm.get_annual_financial_statement(equity.fundamentals.balance_sheet, year)
if balance_sheet:
exchange_rate = fund_utils.gm.get_exchange_rate(methods.validate(balance_sheet.currency), equity)
multiplier = fund_utils.gm.get_measure_unit_multiplier(balance_sheet.measure_unit)
if multiplier is None or exchange_rate is None:
return None
total_debt = methods.validate(balance_sheet.total_debt)
minority_interest = methods.validate(balance_sheet.minority_interest)
# divided by multiplier because it was previously multiplied by the multiplier
market_cap_preferred_shares = get_market_cap_preferred_shares(equity, year) / multiplier
# total_cash = methods.validate(balance_sheet.cash_and_short_term_investments)
cash = methods.validate(balance_sheet.cash)
cash_and_equivalents = methods.validate(balance_sheet.cash_and_equivalents)
total_cash = cash + cash_and_equivalents
else:
return None
enterprise_value_before_market_cap = total_debt + \
market_cap_preferred_shares + \
minority_interest - \
total_cash
enterprise_value = market_cap + (enterprise_value_before_market_cap * multiplier * exchange_rate)
return enterprise_value
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
| 43.789474 | 109 | 0.691106 |
acf5fc8981a172f24e04dc312a8a60aac9e47259 | 2,870 | py | Python | mmdet/models/bbox_heads/cascade_ped_head.py | Ernstsen/Pedestron | 0c5aa35881561bcd0acf5de8939472efd6409256 | [
"Apache-2.0"
] | 594 | 2020-03-20T11:52:59.000Z | 2022-03-30T11:58:55.000Z | mmdet/models/bbox_heads/cascade_ped_head.py | Ernstsen/Pedestron | 0c5aa35881561bcd0acf5de8939472efd6409256 | [
"Apache-2.0"
] | 131 | 2020-03-25T09:48:04.000Z | 2022-03-30T17:54:38.000Z | mmdet/models/bbox_heads/cascade_ped_head.py | Ernstsen/Pedestron | 0c5aa35881561bcd0acf5de8939472efd6409256 | [
"Apache-2.0"
] | 128 | 2020-03-20T14:22:11.000Z | 2022-03-22T09:41:39.000Z | import torch.nn as nn
from .bbox_head import BBoxHead
from ..registry import HEADS
from ..utils import ConvModule
from ..bbox_heads.convfc_bbox_head import ConvFCBBoxHead
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.core import (delta2bbox, multiclass_nms, bbox_target, force_fp32,
auto_fp16)
from ..builder import build_loss
from ..losses import accuracy
from ..registry import HEADS
@HEADS.register_module
class CascadePedFCBBoxHead(ConvFCBBoxHead):
def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):
assert num_fcs >= 1
super(CascadePedFCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=num_fcs,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_det_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,
self.target_stds, img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
if rescale:
bboxes /= scale_factor
if cfg is None:
return bboxes, scores
else:
values, indices = torch.max(scores, dim=1)
bboxes[:, 8 + 3] = bboxes[:, 8+1] + (bboxes[:, 8+3] - bboxes[:, 8+1])/0.4
# print(bboxes[indices==3, 1])
# print(bboxes[indices==3, 3] - bboxes[indices==3, 1])
bboxes[:, 12 + 1] = bboxes[:, 12 + 3] - (bboxes[:, 12 + 3] - bboxes[:, 12 + 1])/0.6
# print(bboxes[indices==3, 1])
bboxes[indices==2, 4:8] = bboxes[indices==2, 8:12]
bboxes[indices==3, 4:8] = bboxes[indices==3, 12:16]
scores[:, 1] = torch.max(scores[:, 1:], dim=1)[0]
scores[:, 2] = 0
scores[:, 3] = 0
det_bboxes, det_labels = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
| 35 | 95 | 0.52892 |
acf5fce79be4f05b2e7eff4148693a4714d12520 | 450 | py | Python | src/util.py | alvarosaavedra/BladesInTheDarkGenerators | 1790e76bc15f50b4d6323d606a9e0e933338bd21 | [
"MIT"
] | null | null | null | src/util.py | alvarosaavedra/BladesInTheDarkGenerators | 1790e76bc15f50b4d6323d606a9e0e933338bd21 | [
"MIT"
] | null | null | null | src/util.py | alvarosaavedra/BladesInTheDarkGenerators | 1790e76bc15f50b4d6323d606a9e0e933338bd21 | [
"MIT"
] | null | null | null | import random
import json
def json_retreiver(json_filename):
"""Call this from a variable with a filename string to populate
with json content"""
filename = json_filename
with open(filename) as f:
return json.load(f)
def rc(variable):
"""rc = random choice. Picks a random item from the list and returns
it. This is mostly to shorten up the variables in the print command"""
return random.choice(variable)
| 26.470588 | 74 | 0.702222 |
acf5fd282bfe452e5f50211b7237b14315ecc115 | 2,836 | py | Python | lldb/test/API/lang/swift/different_clang_flags/TestSwiftDifferentClangFlags.py | c834606877/llvm-project | 01de58136ae4971b8d7d32a765092121f9975377 | [
"Apache-2.0"
] | 3 | 2021-06-11T17:30:05.000Z | 2022-01-29T13:46:47.000Z | lldb/test/API/lang/swift/different_clang_flags/TestSwiftDifferentClangFlags.py | WYK15/swift-Ollvm10 | ea68224ab23470963b68dfcc28b5ac769a070ea3 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/lang/swift/different_clang_flags/TestSwiftDifferentClangFlags.py | WYK15/swift-Ollvm10 | ea68224ab23470963b68dfcc28b5ac769a070ea3 | [
"Apache-2.0"
] | null | null | null | # TestSwiftDifferentClangFlags.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that we use the right compiler flags when debugging
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import os.path
import unittest2
import sys
if sys.version_info.major == 2:
import commands as subprocess
else:
import subprocess
def execute_command(command):
# print '%% %s' % (command)
(exit_status, output) = subprocess.getstatusoutput(command)
# if output:
# print output
# print 'status = %u' % (exit_status)
return exit_status
class TestSwiftDifferentClangFlags(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@skipUnlessDarwin
@swiftTest
@skipIf(
debug_info=decorators.no_match("dsym"),
bugnumber="This test requires a stripped binary and a dSYM")
def test_swift_different_clang_flags(self):
"""Test that we use the right compiler flags when debugging"""
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("main"))
self.assertTrue(target, VALID_TARGET)
self.registerSharedLibrariesWithTarget(target, ['moda', 'modb'])
target, process, thread, modb_breakpoint = \
lldbutil.run_to_source_breakpoint(
self, 'break here', lldb.SBFileSpec("modb.swift"),
exe_name=self.getBuildArtifact("main"))
main_breakpoint = target.BreakpointCreateBySourceRegex(
'break here',lldb.SBFileSpec('main.swift'))
self.assertTrue(
modb_breakpoint.GetNumLocations() > 0,
VALID_BREAKPOINT)
var = self.frame().FindVariable("myThree")
three = var.GetChildMemberWithName("three")
lldbutil.check_variable(self, var, False, typename="modb.MyStruct")
lldbutil.check_variable(self, three, False, value="3")
process.Continue()
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, main_breakpoint)
var = self.frame().FindVariable("a")
lldbutil.check_variable(self, var, False, value="2")
var = self.frame().FindVariable("b")
lldbutil.check_variable(self, var, False, value="3")
var = self.frame().EvaluateExpression("fA()")
lldbutil.check_variable(self, var, False, value="2")
| 32.976744 | 80 | 0.663258 |
acf5fda3f3362b06bbf39c1152b9bc75996671aa | 76 | py | Python | afterglow_core/views/public_api/__init__.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | 2 | 2021-05-24T15:12:07.000Z | 2022-02-17T19:58:16.000Z | afterglow_core/views/public_api/__init__.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | 1 | 2022-02-27T03:01:06.000Z | 2022-02-27T03:01:06.000Z | afterglow_core/views/public_api/__init__.py | SkynetRTN/afterglow-access-server | 3d8d62f622577fdd1ae7b0076cb536251f7bf0cd | [
"Apache-2.0"
] | 2 | 2021-06-08T18:16:40.000Z | 2021-07-09T14:19:49.000Z | """
Afterglow Core: views for all public API versions
"""
from . import v1
| 12.666667 | 49 | 0.697368 |
acf5fec5976455f764f6c5d788554b59d2b3110f | 1,019 | py | Python | oxe-api/resource/private/get_my_user.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/resource/private/get_my_user.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/resource/private/get_my_user.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | from flask_apispec import MethodResource
from flask_apispec import doc
from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_restful import Resource
from db.db import DB
from decorator.catch_exception import catch_exception
from decorator.log_request import log_request
class GetMyUser(MethodResource, Resource):
def __init__(self, db: DB):
self.db = db
@log_request
@doc(tags=['private'],
description='Get information about the user authenticated by the token (excluding the password)',
responses={
"200": {},
"401": {"description": "The user has not been found"},
})
@jwt_required
@catch_exception
def get(self):
data = self.db.get(self.db.tables["User"], {"id": get_jwt_identity()})
if len(data) == 0:
return "", "401 The user has not been found"
data = data[0].__dict__
del data["password"]
del data['_sa_instance_state']
return data, "200 "
| 27.540541 | 106 | 0.654563 |
acf5fefb1dd34b3fca7a898c40b29c9ee7c29953 | 3,717 | py | Python | preprocess_for_fairseq/lexicon_generator.py | SeunghyunSEO/seosh_fairseq | 443b2a8effb6b8fba5758989076cf992470ccb62 | [
"MIT"
] | null | null | null | preprocess_for_fairseq/lexicon_generator.py | SeunghyunSEO/seosh_fairseq | 443b2a8effb6b8fba5758989076cf992470ccb62 | [
"MIT"
] | 2 | 2022-02-22T08:28:06.000Z | 2022-02-22T09:26:26.000Z | preprocess_for_fairseq/lexicon_generator.py | SeunghyunSEO/seosh_fairseq | 443b2a8effb6b8fba5758989076cf992470ccb62 | [
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import re
# arpa_file='/home1/irteam/users/seosh/decoder_pratice/librispeech_model/librispeech-lm-train-norm-word-4gram.arpa'
# lex_file='/home1/irteam/users/seosh/decoder_pratice/librispeech_model/librispeech-lm-train-norm-word-4gram.lexicon'
arpa_file='/home1/irteam/users/seosh/decoder_pratice/librispeech_model/4-gram.arpa'
lex_file='/home1/irteam/users/seosh/decoder_pratice/librispeech_model/w2v2_4gram_lexicon.lexicon'
print("Writing Lexicon file - {}...".format(lex_file))
with open(lex_file, "w") as f:
with open(arpa_file, "r") as arpa:
for i,line in enumerate(arpa):
# verify if the line corresponds to unigram
if not re.match(r"[-]*[0-9\.]+\t\S+\t*[-]*[0-9\.]*$", line):
continue
# print(line)
word = line.split("\t")[1]
word = word.strip()
# print(word)
# if word == "<unk>" or word == "<s>" or word == "</s>":
# continue
if word == "<UNK>" or word == "<s>" or word == "</s>":
continue
assert re.match("^[A-Z']+$", word), "invalid word - {w}".format(w=word)
f.write("{w}\t{s} |\n".format(w=word, s=" ".join(word)))
print("{w}\t{s} |\n".format(w=word, s=" ".join(word)))
print("Done!", flush=True)
# arpa_file='/home1/irteam/users/seosh/decoder_pratice/librispeech_model/decoder/lm_librispeech_word_transformer/lm_librispeech_word_transformer.dict'
# lex_file='/home1/irteam/users/seosh/decoder_pratice/librispeech_model/w2v2_transformer_lm_lexicon.lexicon'
# with open(lex_file, "w") as f:
# with open(arpa_file, "r") as arpa:
# for i,line in enumerate(arpa):
# # print('line',line)
# word = line.split(" ")[0]
# word = word.strip()
# # print('word',word)
# if word == "<unk>" or word == "<s>" or word == "</s>":
# continue
# assert re.match("^[a-z']+$", word), "invalid word - {w}".format(w=word)
# f.write("{w}\t{s} |\n".format(w=word, s=" ".join(word.upper())))
# # print("{w}\t{s} |\n".format(w=word, s=" ".join(word.upper())))
# arpa_file='/home1/irteam/users/seosh/decoder_pratice/librispeech_model/decoder/lm_librispeech_word_transformer/lm_librispeech_word_transformer.dict'
# lex_file='/home1/irteam/users/seosh/decoder_pratice/librispeech_model/w2v2_transformer_lm_upper_lexicon.lexicon'
# with open(lex_file, "w") as f:
# with open(arpa_file, "r") as arpa:
# for i,line in enumerate(arpa):
# # print('line',line)
# word = line.split(" ")[0]
# word = word.strip()
# # print('word',word)
# if word == "<unk>" or word == "<s>" or word == "</s>":
# continue
# assert re.match("^[a-z']+$", word), "invalid word - {w}".format(w=word)
# f.write("{w}\t{s} |\n".format(w=word.upper(), s=" ".join(word.upper())))
# print("{w}\t{s} |\n".format(w=word.upper(), s=" ".join(word.upper())))
# with open(arpa_file, "r") as arpa:
# for i,line in enumerate(arpa):
# print('line',line)
# word = line.split(" ")[0]
# word = word.strip()
# print('word',word)
# if word == "<unk>" or word == "<s>" or word == "</s>":
# continue
# assert re.match("^[a-z']+$", word), "invalid word - {w}".format(w=word)
# # f.write("{w}\t{s} |\n".format(w=word, s=" ".join(word)))
# print("{w}\t{s} |\n".format(w=word, s=" ".join(word.upper())))
# if i == 100:
# exit() | 44.783133 | 150 | 0.565241 |
acf6014f6017816da8ef4308147267f4a628e061 | 2,066 | py | Python | kivy/p001/app/pomodo.py | Israel97f/Projetos | 75601b2c21d1c03fe8989603278cd40044443a63 | [
"MIT"
] | null | null | null | kivy/p001/app/pomodo.py | Israel97f/Projetos | 75601b2c21d1c03fe8989603278cd40044443a63 | [
"MIT"
] | null | null | null | kivy/p001/app/pomodo.py | Israel97f/Projetos | 75601b2c21d1c03fe8989603278cd40044443a63 | [
"MIT"
] | null | null | null | from itertools import cycle
from types import new_class
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.properties import StringProperty, BooleanProperty
from kivymd.app import MDApp
from kivymd.uix.floatlayout import FloatLayout, MDFloatLayout
class Cycle:
def __init__(self):
self.cycle = [Time(25), Time(5), Time(25), Time(5), Time(25), Time(30)]
def __iter__(self):
return self
def __next__(self):
return next(self.cycle)
class Time:
def __init__(self, time):
self.time = self.time * 60
def decrementar(self):
self.time -= 1
return self.time
def __str__(self):
return '{:02d}:{:02d}'.format(divmod(self.time, 60))
class Pomodoro(MDFloatLayout):
timer_string = StringProperty('25:00')
button_string = StringProperty('Iniciar')
running = BooleanProperty(False)
cycle = Cycle()
def __init__ (self):
seper().__init__(**kwargs)
self._time = next(self.cycle)
self.time_string = str(self._time)
def start(self):
self.button_string = 'Palsar'
if not self.running:
self.running = True
Clock.schedule_interval(self.update, 1)
def stop(self):
self.button_string = 'Reinciar'
if self.running:
self.running = False
def click(self):
if self.running:
self.stop()
else:
self.start()
def update (self, *args):
time = self._time.decrementar()
if time == 0:
self.stop()
self._time = next(self.cycle)
self.timer_string = str(self._time)
class PomoGumo(MDApp):
def change_color(self):
theme = self.theme_cls.theme_style
if theme == 'Dark':
self.theme_cls.theme_style = 'Light'
else:
self.theme_cls.theme_style = 'Dark'
def build(self):
self.theme_cls.primary_palette = 'Purple'
self.theme_cls.primary_hue = '500'
return Builder.load_file('app/pomodoro.kv')
| 25.825 | 79 | 0.610842 |
acf6016062731c4aed1e9eb3a7aab7b72a698133 | 9,250 | py | Python | lale/lib/autogen/multi_task_lasso_cv.py | mfeffer/lale | 57b58843c7c14dc2e5658244280f2c1918bf030b | [
"Apache-2.0"
] | 265 | 2019-08-06T14:45:43.000Z | 2022-03-30T23:57:48.000Z | lale/lib/autogen/multi_task_lasso_cv.py | mfeffer/lale | 57b58843c7c14dc2e5658244280f2c1918bf030b | [
"Apache-2.0"
] | 467 | 2019-08-08T02:01:21.000Z | 2022-03-25T16:12:00.000Z | lale/lib/autogen/multi_task_lasso_cv.py | mfeffer/lale | 57b58843c7c14dc2e5658244280f2c1918bf030b | [
"Apache-2.0"
] | 81 | 2019-08-07T19:59:31.000Z | 2022-03-31T09:11:58.000Z | from numpy import inf, nan
from sklearn.linear_model import MultiTaskLassoCV as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _MultiTaskLassoCVImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for MultiTaskLassoCV Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.",
"allOf": [
{
"type": "object",
"required": [
"eps",
"n_alphas",
"alphas",
"fit_intercept",
"normalize",
"max_iter",
"tol",
"copy_X",
"cv",
"verbose",
"n_jobs",
"random_state",
"selection",
],
"relevantToOptimizer": [
"eps",
"n_alphas",
"fit_intercept",
"normalize",
"max_iter",
"tol",
"copy_X",
"cv",
],
"additionalProperties": False,
"properties": {
"eps": {
"type": "number",
"minimumForOptimizer": 0.001,
"maximumForOptimizer": 0.1,
"distribution": "loguniform",
"default": 0.001,
"description": "Length of the path",
},
"n_alphas": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Number of alphas along the regularization path",
},
"alphas": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like, optional",
},
{"enum": [None]},
],
"default": None,
"description": "List of alphas where to compute the models",
},
"fit_intercept": {
"type": "boolean",
"default": True,
"description": "whether to calculate the intercept for this model",
},
"normalize": {
"type": "boolean",
"default": False,
"description": "This parameter is ignored when ``fit_intercept`` is set to False",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 1000,
"description": "The maximum number of iterations.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"distribution": "loguniform",
"default": 0.0001,
"description": "The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``.",
},
"copy_X": {
"type": "boolean",
"default": True,
"description": "If ``True``, X will be copied; else, it may be overwritten.",
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{
"type": "integer",
"minimum": 1,
"default": 5,
"minimumForOptimizer": 3,
"maximumForOptimizer": 4,
"distribution": "uniform",
},
{"laleType": "Any", "forOptimizer": False},
],
},
"verbose": {
"anyOf": [{"type": "boolean"}, {"type": "integer"}],
"default": False,
"description": "Amount of verbosity.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "Number of CPUs to use during the cross validation",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The seed of the pseudo random number generator that selects a random feature to update",
},
"selection": {
"type": "string",
"default": "cyclic",
"description": "If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default",
},
},
},
{
"XXX TODO XXX": "Parameter: n_jobs > only if multiple values for l1_ratio are given"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit linear model with coordinate descent",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"laleType": "Any",
"XXX TODO XXX": "{array-like}, shape (n_samples, n_features)",
"description": "Training data",
},
"y": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Target values",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predict using the linear model",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Samples.",
}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns predicted values.",
"type": "array",
"items": {"type": "number"},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.MultiTaskLassoCV#sklearn-linear_model-multitasklassocv",
"import_from": "sklearn.linear_model",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
MultiTaskLassoCV = make_operator(_MultiTaskLassoCVImpl, _combined_schemas)
set_docstrings(MultiTaskLassoCV)
| 39.194915 | 219 | 0.451243 |
acf60351b76a294f6b5363df7aecd31b6fb0c0cd | 603 | py | Python | var/spack/repos/builtin/packages/py-python-lzo/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/py-python-lzo/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/py-python-lzo/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPythonLzo(PythonPackage):
"""This module provides Python bindings for the LZO data compression
library."""
homepage = "https://github.com/jd-boyd/python-lzo"
url = "https://pypi.io/packages/source/p/python-lzo/python-lzo-1.12.tar.gz"
version('1.12', sha256='97a8e46825e8f1abd84c2a3372bc09adae9745a5be5d3af2692cd850dac35345')
depends_on('lzo')
| 31.736842 | 94 | 0.739635 |
acf6036e749dbb5ab6e836db1ad6e990694724e7 | 121,270 | py | Python | 7.32.0.dev0/ietf/nomcom/tests.py | kesara/ietf-datatracker | dca3ee2ee98bcb75a10687587cf631750be34c79 | [
"Unlicense"
] | null | null | null | 7.32.0.dev0/ietf/nomcom/tests.py | kesara/ietf-datatracker | dca3ee2ee98bcb75a10687587cf631750be34c79 | [
"Unlicense"
] | null | null | null | 7.32.0.dev0/ietf/nomcom/tests.py | kesara/ietf-datatracker | dca3ee2ee98bcb75a10687587cf631750be34c79 | [
"Unlicense"
] | null | null | null | # Copyright The IETF Trust 2012-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
import io
import random
import shutil
from pyquery import PyQuery
from urllib.parse import urlparse
from itertools import combinations
from django.db import IntegrityError
from django.db.models import Max
from django.conf import settings
from django.core.files import File
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.encoding import force_str
import debug # pyflakes:ignore
from ietf.dbtemplate.factories import DBTemplateFactory
from ietf.dbtemplate.models import DBTemplate
from ietf.doc.factories import DocEventFactory, WgDocumentAuthorFactory, \
NewRevisionDocEventFactory, DocumentAuthorFactory
from ietf.group.factories import GroupFactory, GroupHistoryFactory, RoleFactory, RoleHistoryFactory
from ietf.group.models import Group, Role
from ietf.meeting.factories import MeetingFactory
from ietf.message.models import Message
from ietf.nomcom.test_data import nomcom_test_data, generate_cert, check_comments, \
COMMUNITY_USER, CHAIR_USER, \
MEMBER_USER, SECRETARIAT_USER, EMAIL_DOMAIN, NOMCOM_YEAR
from ietf.nomcom.models import NomineePosition, Position, Nominee, \
NomineePositionStateName, Feedback, FeedbackTypeName, \
Nomination, FeedbackLastSeen, TopicFeedbackLastSeen, ReminderDates
from ietf.nomcom.management.commands.send_reminders import Command, is_time_to_send
from ietf.nomcom.factories import NomComFactory, FeedbackFactory, TopicFactory, \
nomcom_kwargs_for_year, provide_private_key_to_test_client, \
key
from ietf.nomcom.utils import get_nomcom_by_year, make_nomineeposition, \
get_hash_nominee_position, is_eligible, list_eligible, \
get_eligibility_date, suggest_affiliation
from ietf.person.factories import PersonFactory, EmailFactory
from ietf.person.models import Email, Person
from ietf.stats.models import MeetingRegistration
from ietf.stats.factories import MeetingRegistrationFactory
from ietf.utils.mail import outbox, empty_outbox, get_payload_text
from ietf.utils.test_utils import login_testing_unauthorized, TestCase, unicontent
client_test_cert_files = None
def get_cert_files():
global client_test_cert_files
if not client_test_cert_files:
client_test_cert_files = generate_cert()
return client_test_cert_files
def setup_test_public_keys_dir(obj):
obj.saved_nomcom_public_keys_dir = settings.NOMCOM_PUBLIC_KEYS_DIR
obj.nomcom_public_keys_dir = obj.tempdir('nomcom-public-keys')
settings.NOMCOM_PUBLIC_KEYS_DIR = obj.nomcom_public_keys_dir
def teardown_test_public_keys_dir(obj):
settings.NOMCOM_PUBLIC_KEYS_DIR = obj.saved_nomcom_public_keys_dir
shutil.rmtree(obj.nomcom_public_keys_dir)
class NomcomViewsTest(TestCase):
"""Tests to create a new nomcom"""
def check_url_status(self, url, status):
response = self.client.get(url)
self.assertEqual(response.status_code, status)
return response
def setUp(self):
setup_test_public_keys_dir(self)
nomcom_test_data()
self.cert_file, self.privatekey_file = get_cert_files()
self.year = NOMCOM_YEAR
self.email_from = settings.NOMCOM_FROM_EMAIL.format(year=self.year)
self.assertIn(self.year, self.email_from)
# private urls
self.private_index_url = reverse('ietf.nomcom.views.private_index', kwargs={'year': self.year})
self.private_merge_person_url = reverse('ietf.nomcom.views.private_merge_person', kwargs={'year': self.year})
self.private_merge_nominee_url = reverse('ietf.nomcom.views.private_merge_nominee', kwargs={'year': self.year})
self.edit_members_url = reverse('ietf.nomcom.views.edit_members', kwargs={'year': self.year})
self.edit_nomcom_url = reverse('ietf.nomcom.views.edit_nomcom', kwargs={'year': self.year})
self.private_nominate_url = reverse('ietf.nomcom.views.private_nominate', kwargs={'year': self.year})
self.private_nominate_newperson_url = reverse('ietf.nomcom.views.private_nominate_newperson', kwargs={'year': self.year})
self.add_questionnaire_url = reverse('ietf.nomcom.views.private_questionnaire', kwargs={'year': self.year})
self.private_feedback_url = reverse('ietf.nomcom.views.private_feedback', kwargs={'year': self.year})
self.positions_url = reverse('ietf.nomcom.views.list_positions', kwargs={'year': self.year})
self.edit_position_url = reverse('ietf.nomcom.views.edit_position', kwargs={'year': self.year})
# public urls
self.index_url = reverse('ietf.nomcom.views.year_index', kwargs={'year': self.year})
self.history_url = reverse('ietf.nomcom.views.history')
self.requirements_url = reverse('ietf.nomcom.views.requirements', kwargs={'year': self.year})
self.questionnaires_url = reverse('ietf.nomcom.views.questionnaires', kwargs={'year': self.year})
self.public_feedback_url = reverse('ietf.nomcom.views.public_feedback', kwargs={'year': self.year})
self.public_nominate_url = reverse('ietf.nomcom.views.public_nominate', kwargs={'year': self.year})
self.public_nominate_newperson_url = reverse('ietf.nomcom.views.public_nominate_newperson', kwargs={'year': self.year})
def tearDown(self):
teardown_test_public_keys_dir(self)
def access_member_url(self, url):
login_testing_unauthorized(self, COMMUNITY_USER, url)
login_testing_unauthorized(self, CHAIR_USER, url)
self.check_url_status(url, 200)
self.client.logout()
login_testing_unauthorized(self, MEMBER_USER, url)
return self.check_url_status(url, 200)
def access_chair_url(self, url):
login_testing_unauthorized(self, COMMUNITY_USER, url)
login_testing_unauthorized(self, MEMBER_USER, url)
login_testing_unauthorized(self, CHAIR_USER, url)
return self.check_url_status(url, 200)
def access_secretariat_url(self, url):
login_testing_unauthorized(self, COMMUNITY_USER, url)
login_testing_unauthorized(self, CHAIR_USER, url)
login_testing_unauthorized(self, SECRETARIAT_USER, url)
return self.check_url_status(url, 200)
def test_private_index_view(self):
"""Verify private home view"""
self.access_member_url(self.private_index_url)
# Verify that nominee table has links to person and feedback pages
nom_pos = self.create_nominee('accepted', COMMUNITY_USER, 'APP')
person_url = reverse('ietf.person.views.profile',
kwargs={'email_or_name': nom_pos.nominee.name()})
feedback_url = reverse('ietf.nomcom.views.view_feedback_nominee',
kwargs={'year': self.year, 'nominee_id': nom_pos.nominee.pk})
# With a single nominee, the first row will have our data.
# Require that the row have at least one link to the person URL
# and one to the feedback URL.
response = self.client.get(self.private_index_url)
q = PyQuery(response.content)
row_q = q('#nominee-position-table tbody tr').eq(0)
self.assertTrue(row_q('a[href="%s"]' % (person_url)),
'Nominee table does not link to nominee profile page')
self.assertTrue(row_q('a[href="%s#comment"]' % (feedback_url)),
'Nominee table does not link to nominee feedback page')
self.client.logout()
def create_nominee(self, base_state, username, pos_name):
cnominee = Nominee.objects.get(email__person__user__username=username)
position = Position.objects.get(name=pos_name)
return NomineePosition.objects.create(position=position,
nominee=cnominee,
state=NomineePositionStateName.objects.get(slug=base_state))
def create_nominees_for_states(self, base_state):
nom_pos = self.create_nominee(base_state, COMMUNITY_USER, 'APP')
self.create_nominee(base_state, COMMUNITY_USER, 'INT')
self.create_nominee(base_state, COMMUNITY_USER, 'OAM')
return nom_pos
def test_private_index_post_accept(self):
nom_pos = self.create_nominees_for_states('pending')
login_testing_unauthorized(self, CHAIR_USER, self.private_index_url)
test_data = {"action": "set_as_accepted",
"selected": [nom_pos.pk]}
r = self.client.post(self.private_index_url, test_data)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(q('.alert-success'))
self.assertEqual(NomineePosition.objects.filter(state='accepted').count (), 1)
self.client.logout()
def test_private_index_post_decline(self):
nom_pos = self.create_nominees_for_states('pending')
login_testing_unauthorized(self, CHAIR_USER, self.private_index_url)
test_data = {"action": "set_as_declined",
"selected": [nom_pos.pk]}
r = self.client.post(self.private_index_url, test_data)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(q('.alert-success'))
self.assertEqual(NomineePosition.objects.filter(state='declined').count (), 1)
self.client.logout()
def test_private_index_post_pending(self):
nom_pos = self.create_nominees_for_states('declined')
login_testing_unauthorized(self, CHAIR_USER, self.private_index_url)
test_data = {"action": "set_as_pending",
"selected": [nom_pos.pk]}
r = self.client.post(self.private_index_url, test_data)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(q('.alert-success'))
self.assertEqual(NomineePosition.objects.filter(state='pending').count (), 1)
self.client.logout()
def test_private_merge_view(self):
"""Verify private nominee merge view"""
nominees = ['nominee0@example.com',
'nominee1@example.com',
'nominee2@example.com',
'nominee3@example.com']
# do nominations
login_testing_unauthorized(self, COMMUNITY_USER, self.public_nominate_url)
self.nominate_view(public=True,
nominee_email=nominees[0],
position='IAOC')
self.nominate_view(public=True,
nominee_email=nominees[0],
position='IAOC')
self.nominate_view(public=True,
nominee_email=nominees[0],
position='IAB')
self.nominate_view(public=True,
nominee_email=nominees[0],
position='TSV')
self.nominate_view(public=True,
nominee_email=nominees[1],
position='IAOC')
self.nominate_view(public=True,
nominee_email=nominees[1],
position='IAOC')
self.nominate_view(public=True,
nominee_email=nominees[2],
position='IAB')
self.nominate_view(public=True,
nominee_email=nominees[2],
position='IAB')
self.nominate_view(public=True,
nominee_email=nominees[3],
position='TSV')
self.nominate_view(public=True,
nominee_email=nominees[3],
position='TSV')
# Check nominee positions
self.assertEqual(NomineePosition.objects.count(), 6)
self.assertEqual(Feedback.objects.nominations().count(), 10)
# Accept and declined nominations
nominee_position = NomineePosition.objects.get(position__name='IAOC',
nominee__email__address=nominees[0])
nominee_position.state = NomineePositionStateName.objects.get(slug='accepted')
nominee_position.save()
nominee_position = NomineePosition.objects.get(position__name='IAOC',
nominee__email__address=nominees[1])
nominee_position.state = NomineePositionStateName.objects.get(slug='declined')
nominee_position.save()
nominee_position = NomineePosition.objects.get(position__name='IAB',
nominee__email__address=nominees[2])
nominee_position.state = NomineePositionStateName.objects.get(slug='declined')
nominee_position.save()
nominee_position = NomineePosition.objects.get(position__name='TSV',
nominee__email__address=nominees[3])
nominee_position.state = NomineePositionStateName.objects.get(slug='accepted')
nominee_position.save()
self.client.logout()
# fill questionnaires (internally the function does new nominations)
self.access_chair_url(self.add_questionnaire_url)
self.add_questionnaire(public=False,
nominee_email=nominees[0],
position='IAOC')
self.add_questionnaire(public=False,
nominee_email=nominees[1],
position='IAOC')
self.add_questionnaire(public=False,
nominee_email=nominees[2],
position='IAB')
self.add_questionnaire(public=False,
nominee_email=nominees[3],
position='TSV')
self.assertEqual(Feedback.objects.questionnaires().count(), 4)
self.client.logout()
## Add feedbacks (internally the function does new nominations)
self.access_member_url(self.private_feedback_url)
self.feedback_view(public=False,
nominee_email=nominees[0],
position='IAOC')
self.feedback_view(public=False,
nominee_email=nominees[1],
position='IAOC')
self.feedback_view(public=False,
nominee_email=nominees[2],
position='IAB')
self.feedback_view(public=False,
nominee_email=nominees[3],
position='TSV')
self.assertEqual(Feedback.objects.comments().count(), 4)
self.assertEqual(Feedback.objects.nominations().count(), 18)
self.assertEqual(Feedback.objects.nominations().filter(nominees__email__address=nominees[0]).count(), 6)
self.assertEqual(Feedback.objects.nominations().filter(nominees__email__address=nominees[1]).count(), 4)
self.assertEqual(Feedback.objects.nominations().filter(nominees__email__address=nominees[2]).count(), 4)
self.assertEqual(Feedback.objects.nominations().filter(nominees__email__address=nominees[3]).count(), 4)
for nominee in nominees:
self.assertEqual(Feedback.objects.comments().filter(nominees__email__address=nominee).count(),
1)
self.assertEqual(Feedback.objects.questionnaires().filter(nominees__email__address=nominee).count(),
1)
self.client.logout()
## merge nominations
self.access_chair_url(self.private_merge_nominee_url)
test_data = {"secondary_emails": "%s, %s" % (nominees[0], nominees[1]),
"primary_email": nominees[0]}
response = self.client.post(self.private_merge_nominee_url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertTrue(q("form .has-error"))
test_data = {"primary_email": nominees[0],
"secondary_emails": ""}
response = self.client.post(self.private_merge_nominee_url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertTrue(q("form .has-error"))
test_data = {"primary_email": "",
"secondary_emails": nominees[0]}
response = self.client.post(self.private_merge_nominee_url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertTrue(q("form .has-error"))
test_data = {"primary_email": "unknown@example.com",
"secondary_emails": nominees[0]}
response = self.client.post(self.private_merge_nominee_url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertTrue(q("form .has-error"))
test_data = {"primary_email": nominees[0],
"secondary_emails": "unknown@example.com"}
response = self.client.post(self.private_merge_nominee_url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertTrue(q("form .has-error"))
test_data = {"secondary_emails": """%s,
%s,
%s""" % (nominees[1], nominees[2], nominees[3]),
"primary_email": nominees[0]}
response = self.client.post(self.private_merge_nominee_url, test_data)
self.assertEqual(response.status_code, 302)
redirect_url = response["Location"]
redirect_path = urlparse(redirect_url).path
self.assertEqual(redirect_path, reverse('ietf.nomcom.views.private_index', kwargs={"year": NOMCOM_YEAR}))
response = self.client.get(redirect_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "alert-success")
self.assertEqual(Nominee.objects.filter(email__address=nominees[1],
duplicated__isnull=False).count(), 1)
self.assertEqual(Nominee.objects.filter(email__address=nominees[2],
duplicated__isnull=False).count(), 1)
self.assertEqual(Nominee.objects.filter(email__address=nominees[3],
duplicated__isnull=False).count(), 1)
nominee = Nominee.objects.get(email__address=nominees[0])
self.assertEqual(Nomination.objects.filter(nominee=nominee).count(), 18)
self.assertEqual(Feedback.objects.nominations().filter(nominees__in=[nominee]).count(),
18)
self.assertEqual(Feedback.objects.comments().filter(nominees__in=[nominee]).count(),
4)
self.assertEqual(Feedback.objects.questionnaires().filter(nominees__in=[nominee]).count(),
4)
for nominee_email in nominees[1:]:
self.assertEqual(Feedback.objects.nominations().filter(nominees__email__address=nominee_email).count(),
0)
self.assertEqual(Feedback.objects.comments().filter(nominees__email__address=nominee_email).count(),
0)
self.assertEqual(Feedback.objects.questionnaires().filter(nominees__email__address=nominee_email).count(),
0)
self.assertEqual(NomineePosition.objects.filter(nominee=nominee).count(), 3)
# Check nominations state
self.assertEqual(NomineePosition.objects.get(position__name='TSV',
nominee=nominee).state.slug, 'accepted')
self.assertEqual(NomineePosition.objects.get(position__name='IAOC',
nominee=nominee).state.slug, 'accepted')
self.assertEqual(NomineePosition.objects.get(position__name='IAB',
nominee=nominee).state.slug, 'declined')
self.client.logout()
def change_members(self, members):
members_emails = ','.join(['%s%s' % (member, EMAIL_DOMAIN) for member in members])
test_data = {'members': members_emails,}
self.client.post(self.edit_members_url, test_data)
def test_edit_members_view(self):
"""Verify edit member view"""
self.access_chair_url(self.edit_members_url)
self.change_members([CHAIR_USER, COMMUNITY_USER])
# check member actions
self.client.login(username=COMMUNITY_USER,password=COMMUNITY_USER+"+password")
self.check_url_status(self.private_index_url, 200)
self.client.logout()
# revert edit nomcom members
login_testing_unauthorized(self, CHAIR_USER, self.edit_members_url)
self.change_members([CHAIR_USER])
self.client.logout()
self.client.login(username=COMMUNITY_USER,password=COMMUNITY_USER+"+password")
self.check_url_status(self.private_index_url, 403)
self.client.logout()
def test_edit_nomcom_view(self):
r = self.access_chair_url(self.edit_nomcom_url)
q = PyQuery(r.content)
reminder_date = '%s-09-30' % self.year
f = io.open(self.cert_file.name)
response = self.client.post(self.edit_nomcom_url, {
'public_key': f,
'reminderdates_set-TOTAL_FORMS': q('input[name="reminderdates_set-TOTAL_FORMS"]').val(),
'reminderdates_set-INITIAL_FORMS': q('input[name="reminderdates_set-INITIAL_FORMS"]').val(),
'reminderdates_set-MAX_NUM_FORMS': q('input[name="reminderdates_set-MAX_NUM_FORMS"]').val(),
'reminderdates_set-0-date': reminder_date,
})
f.close()
self.assertEqual(response.status_code, 200)
nominee = Nominee.objects.get(email__person__user__username=COMMUNITY_USER)
position = Position.objects.get(name='OAM')
comment_text = 'Plain text. Comments with accents äöåÄÖÅ éáíóú âêîôû ü àèìòù.'
nomcom = get_nomcom_by_year(self.year)
feedback = Feedback.objects.create(nomcom=nomcom,
comments=nomcom.encrypt(comment_text),
type=FeedbackTypeName.objects.get(slug='nomina'))
feedback.positions.add(position)
feedback.nominees.add(nominee)
# to check feedback comments are saved like enrypted data
self.assertNotEqual(feedback.comments, comment_text)
self.assertEqual(check_comments(feedback.comments, comment_text, self.privatekey_file), True)
# Check that the set reminder date is present
reminder_dates = dict([ (d.id,str(d.date)) for d in nomcom.reminderdates_set.all() ])
self.assertIn(reminder_date, list(reminder_dates.values()))
# Remove reminder date
q = PyQuery(response.content) # from previous post
r = self.client.post(self.edit_nomcom_url, {
'reminderdates_set-TOTAL_FORMS': q('input[name="reminderdates_set-TOTAL_FORMS"]').val(),
'reminderdates_set-INITIAL_FORMS': q('input[name="reminderdates_set-INITIAL_FORMS"]').val(),
'reminderdates_set-MAX_NUM_FORMS': q('input[name="reminderdates_set-MAX_NUM_FORMS"]').val(),
'reminderdates_set-0-id': str(list(reminder_dates.keys())[0]),
'reminderdates_set-0-date': '',
})
self.assertEqual(r.status_code, 200)
# Check that reminder date has been removed
reminder_dates = dict([ (d.id,str(d.date)) for d in ReminderDates.objects.filter(nomcom=nomcom) ])
self.assertNotIn(reminder_date, list(reminder_dates.values()))
self.client.logout()
def test_list_positions(self):
login_testing_unauthorized(self, CHAIR_USER, self.positions_url)
def test_list_positions_add(self):
nomcom = get_nomcom_by_year(self.year)
count = nomcom.position_set.all().count()
login_testing_unauthorized(self, CHAIR_USER, self.edit_position_url)
test_data = {"action" : "add", "name": "testpos" }
r = self.client.post(self.edit_position_url, test_data)
self.assertEqual(r.status_code, 302)
self.assertEqual(nomcom.position_set.all().count(), count+1)
def test_index_view(self):
"""Verify home view"""
self.check_url_status(self.index_url, 200)
def test_history_view(self):
"""Verify history view"""
self.check_url_status(self.history_url, 200)
def test_announcements_view(self):
nomcom = Group.objects.get(acronym="nomcom%s" % self.year, type="nomcom")
msg = Message.objects.create(
by=Person.objects.all()[0],
subject="This is a test",
to="test@example.com",
frm="nomcomchair@example.com",
body="Hello World!",
content_type="text/plain",
)
msg.related_groups.add(nomcom)
r = self.client.get(reverse('ietf.nomcom.views.announcements'))
self.assertEqual(r.status_code, 200)
self.assertContains(r, ("Messages from %s" % nomcom.time.year))
self.assertContains(r, nomcom.role_set.filter(name="chair")[0].person.email_address())
self.assertContains(r, msg.subject)
def test_requirements_view(self):
"""Verify requirements view"""
self.check_url_status(self.requirements_url, 200)
def test_questionnaires_view(self):
"""Verify questionnaires view"""
self.check_url_status(self.questionnaires_url, 200)
def test_public_nominate(self):
login_testing_unauthorized(self, COMMUNITY_USER, self.public_nominate_url)
messages_before = len(outbox)
self.nominate_view(public=True,confirmation=True)
self.assertEqual(len(outbox), messages_before + 3)
self.assertEqual('IETF Nomination Information', outbox[-3]['Subject'])
self.assertEqual(self.email_from, outbox[-3]['From'])
self.assertIn('nominee', outbox[-3]['To'])
self.assertEqual('Nomination Information', outbox[-2]['Subject'])
self.assertEqual(self.email_from, outbox[-2]['From'])
self.assertIn('nomcomchair', outbox[-2]['To'])
self.assertEqual('Nomination receipt', outbox[-1]['Subject'])
self.assertEqual(self.email_from, outbox[-1]['From'])
self.assertIn('plain', outbox[-1]['To'])
self.assertIn('Comments with accents äöå', get_payload_text(outbox[-1]))
# Nominate the same person for the same position again without asking for confirmation
messages_before = len(outbox)
self.nominate_view(public=True)
self.assertEqual(len(outbox), messages_before + 1)
self.assertEqual('Nomination Information', outbox[-1]['Subject'])
self.assertEqual(self.email_from, outbox[-1]['From'])
self.assertIn('nomcomchair', outbox[-1]['To'])
def test_private_nominate(self):
self.access_member_url(self.private_nominate_url)
return self.nominate_view(public=False)
self.client.logout()
def test_public_nominate_newperson(self):
login_testing_unauthorized(self, COMMUNITY_USER, self.public_nominate_url)
messages_before = len(outbox)
self.nominate_newperson_view(public=True,confirmation=True)
self.assertEqual(len(outbox), messages_before + 4)
self.assertEqual('New person is created', outbox[-4]['Subject'])
self.assertEqual(self.email_from, outbox[-4]['From'])
self.assertIn('secretariat', outbox[-4]['To'])
self.assertEqual('IETF Nomination Information', outbox[-3]['Subject'])
self.assertEqual(self.email_from, outbox[-3]['From'])
self.assertIn('nominee', outbox[-3]['To'])
self.assertEqual('Nomination Information', outbox[-2]['Subject'])
self.assertEqual(self.email_from, outbox[-2]['From'])
self.assertIn('nomcomchair', outbox[-2]['To'])
self.assertEqual('Nomination receipt', outbox[-1]['Subject'])
self.assertEqual(self.email_from, outbox[-1]['From'])
self.assertIn('plain', outbox[-1]['To'])
self.assertIn('Comments with accents äöå', get_payload_text(outbox[-1]))
# Nominate the same person for the same position again without asking for confirmation
messages_before = len(outbox)
self.nominate_view(public=True)
self.assertEqual(len(outbox), messages_before + 1)
self.assertEqual('Nomination Information', outbox[-1]['Subject'])
self.assertEqual(self.email_from, outbox[-1]['From'])
self.assertIn('nomcomchair', outbox[-1]['To'])
def test_private_nominate_newperson(self):
self.access_member_url(self.private_nominate_url)
return self.nominate_newperson_view(public=False)
self.client.logout()
def test_private_nominate_newperson_who_already_exists(self):
EmailFactory(address='nominee@example.com')
self.access_member_url(self.private_nominate_newperson_url)
return self.nominate_newperson_view(public=False)
def test_public_nominate_with_automatic_questionnaire(self):
nomcom = get_nomcom_by_year(self.year)
nomcom.send_questionnaire = True
nomcom.save()
login_testing_unauthorized(self, COMMUNITY_USER, self.public_nominate_url)
empty_outbox()
self.nominate_view(public=True)
self.assertEqual(len(outbox), 3)
# test_public_nominate checks the other messages
self.assertEqual(self.email_from, outbox[-1]['From'])
self.assertIn('Questionnaire', outbox[1]['Subject'])
self.assertIn('nominee@', outbox[1]['To'])
def nominate_view(self, *args, **kwargs):
public = kwargs.pop('public', True)
searched_email = kwargs.pop('searched_email', None)
nominee_email = kwargs.pop('nominee_email', 'nominee@example.com')
if not searched_email:
searched_email = Email.objects.filter(address=nominee_email).first()
if not searched_email:
searched_email = EmailFactory(address=nominee_email, primary=True, origin='test')
if not searched_email.person:
searched_email.person = PersonFactory()
searched_email.save()
nominator_email = kwargs.pop('nominator_email', "%s%s" % (COMMUNITY_USER, EMAIL_DOMAIN))
position_name = kwargs.pop('position', 'IAOC')
confirmation = kwargs.pop('confirmation', False)
if public:
nominate_url = self.public_nominate_url
else:
nominate_url = self.private_nominate_url
response = self.client.get(nominate_url)
self.assertEqual(response.status_code, 200)
nomcom = get_nomcom_by_year(self.year)
if not nomcom.public_key:
q = PyQuery(response.content)
self.assertEqual(len(q("#nominate-form")), 0)
# save the cert file in tmp
#nomcom.public_key.storage.location = tempfile.gettempdir()
nomcom.public_key.save('cert', File(io.open(self.cert_file.name, 'r')))
response = self.client.get(nominate_url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertEqual(len(q("#nominate-form")), 1)
position = Position.objects.get(name=position_name)
comment_text = 'Test nominate view. Comments with accents äöåÄÖÅ éáíóú âêîôû ü àèìòù.'
candidate_phone = '123456'
test_data = {'searched_email': searched_email.pk,
'candidate_phone': candidate_phone,
'position': position.id,
'qualifications': comment_text,
'confirmation': confirmation}
if not public:
test_data['nominator_email'] = nominator_email
response = self.client.post(nominate_url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertContains(response, "alert-success")
# check objects
nominee = Nominee.objects.get(email=searched_email)
NomineePosition.objects.get(position=position, nominee=nominee)
feedback = Feedback.objects.filter(positions__in=[position],
nominees__in=[nominee],
type=FeedbackTypeName.objects.get(slug='nomina')).latest('id')
if public:
self.assertEqual(feedback.author, nominator_email)
# to check feedback comments are saved like enrypted data
self.assertNotEqual(feedback.comments, comment_text)
self.assertEqual(check_comments(feedback.comments, comment_text, self.privatekey_file), True)
Nomination.objects.get(position=position,
candidate_name=nominee.person.plain_name(),
candidate_email=searched_email.address,
candidate_phone=candidate_phone,
nominee=nominee,
comments=feedback,
nominator_email="%s%s" % (COMMUNITY_USER, EMAIL_DOMAIN))
def nominate_newperson_view(self, *args, **kwargs):
public = kwargs.pop('public', True)
nominee_email = kwargs.pop('nominee_email', 'nominee@example.com')
nominator_email = kwargs.pop('nominator_email', "%s%s" % (COMMUNITY_USER, EMAIL_DOMAIN))
position_name = kwargs.pop('position', 'IAOC')
confirmation = kwargs.pop('confirmation', False)
if public:
nominate_url = self.public_nominate_newperson_url
else:
nominate_url = self.private_nominate_newperson_url
response = self.client.get(nominate_url)
self.assertEqual(response.status_code, 200)
nomcom = get_nomcom_by_year(self.year)
if not nomcom.public_key:
q = PyQuery(response.content)
self.assertEqual(len(q("#nominate-form")), 0)
# save the cert file in tmp
#nomcom.public_key.storage.location = tempfile.gettempdir()
nomcom.public_key.save('cert', File(io.open(self.cert_file.name, 'r')))
response = self.client.get(nominate_url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertEqual(len(q("#nominate-form")), 1)
position = Position.objects.get(name=position_name)
candidate_email = nominee_email
candidate_name = 'nominee'
comment_text = 'Test nominate view. Comments with accents äöåÄÖÅ éáíóú âêîôû ü àèìòù.'
candidate_phone = '123456'
test_data = {'candidate_name': candidate_name,
'candidate_email': candidate_email,
'candidate_phone': candidate_phone,
'position': position.id,
'qualifications': comment_text,
'confirmation': confirmation}
if not public:
test_data['nominator_email'] = nominator_email
if Email.objects.filter(address=nominee_email).exists():
response = self.client.post(nominate_url, test_data,follow=True)
self.assertFalse(response.redirect_chain)
self.assertEqual(response.status_code, 200)
self.assertIn('already in the datatracker',unicontent(response))
else:
response = self.client.post(nominate_url, test_data,follow=True)
self.assertTrue(response.redirect_chain)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertContains(response, "alert-success")
# check objects
email = Email.objects.get(address=candidate_email)
Person.objects.get(name=candidate_name)
nominee = Nominee.objects.get(email=email)
NomineePosition.objects.get(position=position, nominee=nominee)
feedback = Feedback.objects.filter(positions__in=[position],
nominees__in=[nominee],
type=FeedbackTypeName.objects.get(slug='nomina')).latest('id')
if public:
self.assertEqual(feedback.author, nominator_email)
# to check feedback comments are saved like enrypted data
self.assertNotEqual(feedback.comments, comment_text)
self.assertEqual(check_comments(feedback.comments, comment_text, self.privatekey_file), True)
Nomination.objects.get(position=position,
candidate_name=candidate_name,
candidate_email=candidate_email,
candidate_phone=candidate_phone,
nominee=nominee,
comments=feedback,
nominator_email="%s%s" % (COMMUNITY_USER, EMAIL_DOMAIN))
def test_add_questionnaire(self):
self.access_chair_url(self.add_questionnaire_url)
return self.add_questionnaire()
self.client.logout()
def add_questionnaire(self, *args, **kwargs):
public = kwargs.pop('public', False)
nominee_email = kwargs.pop('nominee_email', 'nominee@example.com')
nominator_email = kwargs.pop('nominator_email', "%s%s" % (COMMUNITY_USER, EMAIL_DOMAIN))
position_name = kwargs.pop('position', 'IAOC')
self.nominate_view(public=public,
nominee_email=nominee_email,
position=position_name,
nominator_email=nominator_email)
response = self.client.get(self.add_questionnaire_url)
self.assertEqual(response.status_code, 200)
nomcom = get_nomcom_by_year(self.year)
if not nomcom.public_key:
self.assertNotContains(response, "questionnnaireform")
# save the cert file in tmp
#nomcom.public_key.storage.location = tempfile.gettempdir()
nomcom.public_key.save('cert', File(io.open(self.cert_file.name, 'r')))
response = self.client.get(self.add_questionnaire_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "questionnnaireform")
position = Position.objects.get(name=position_name)
nominee = Nominee.objects.get(email__address=nominee_email)
comment_text = 'Test add questionnaire view. Comments with accents äöåÄÖÅ éáíóú âêîôû ü àèìòù.'
test_data = {'comment_text': comment_text,
'nominee': '%s_%s' % (position.id, nominee.id)}
response = self.client.post(self.add_questionnaire_url, test_data)
self.assertContains(response, "alert-success")
## check objects
feedback = Feedback.objects.filter(positions__in=[position],
nominees__in=[nominee],
type=FeedbackTypeName.objects.get(slug='questio')).latest('id')
## to check feedback comments are saved like enrypted data
self.assertNotEqual(feedback.comments, comment_text)
self.assertEqual(check_comments(feedback.comments, comment_text, self.privatekey_file), True)
def test_public_feedback(self):
login_testing_unauthorized(self, COMMUNITY_USER, self.public_feedback_url)
position = "IAOC"
empty_outbox()
self.feedback_view(public=True, confirmation=True, position=position)
# feedback_view does a nomination internally: there is a lot of email related to that - tested elsewhere
# We're interested in the confirmation receipt here
self.assertEqual(len(outbox),3)
self.assertEqual('NomCom comment confirmation', outbox[2]['Subject'])
email_body = get_payload_text(outbox[2])
self.assertIn(position, email_body)
self.assertNotIn('$', email_body)
self.assertEqual(self.email_from, outbox[-2]['From'])
self.assertIn('plain', outbox[2]['To'])
self.assertIn('Comments with accents äöå', get_payload_text(outbox[2]))
empty_outbox()
self.feedback_view(public=True)
self.assertEqual(len(outbox),1)
self.assertNotIn('confirmation', outbox[0]['Subject'])
def test_private_feedback(self):
self.access_member_url(self.private_feedback_url)
return self.feedback_view(public=False)
def feedback_view(self, *args, **kwargs):
public = kwargs.pop('public', True)
nominee_email = kwargs.pop('nominee_email', 'nominee@example.com')
nominator_email = kwargs.pop('nominator_email', "%s%s" % (COMMUNITY_USER, EMAIL_DOMAIN))
position_name = kwargs.pop('position', 'IAOC')
confirmation = kwargs.pop('confirmation', False)
self.nominate_view(public=public,
nominee_email=nominee_email,
position=position_name,
nominator_email=nominator_email)
feedback_url = self.public_feedback_url
if not public:
feedback_url = self.private_feedback_url
response = self.client.get(feedback_url)
self.assertEqual(response.status_code, 200)
nomcom = get_nomcom_by_year(self.year)
if not nomcom.public_key:
self.assertNotContains(response, "feedbackform")
# save the cert file in tmp
#nomcom.public_key.storage.location = tempfile.gettempdir()
nomcom.public_key.save('cert', File(io.open(self.cert_file.name, 'r')))
response = self.client.get(feedback_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "feedbackform")
position = Position.objects.get(name=position_name)
nominee = Nominee.objects.get(email__address=nominee_email)
feedback_url += "?nominee=%d&position=%d" % (nominee.id, position.id)
response = self.client.get(feedback_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "feedbackform")
# Test for a link to the nominee's profile page
q = PyQuery(response.content)
person_url = reverse('ietf.person.views.profile', kwargs={'email_or_name': nominee.name()})
self.assertTrue(q('a[href="%s"]' % (person_url)),
'Nominee feedback page does not link to profile page')
comments = 'Test feedback view. Comments with accents äöåÄÖÅ éáíóú âêîôû ü àèìòù.'
test_data = {'comment_text': comments,
'position_name': position.name,
'nominee_name': nominee.email.person.name,
'nominee_email': nominee.email.address,
'confirmation': confirmation}
if public:
test_data['nominator_email'] = nominator_email
test_data['nominator_name'] = nominator_email
nominee_position = NomineePosition.objects.get(nominee=nominee,
position=position)
state = nominee_position.state
if state.slug != 'accepted':
response = self.client.post(feedback_url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertTrue(q("form .has-error"))
# accept nomination
nominee_position.state = NomineePositionStateName.objects.get(slug='accepted')
nominee_position.save()
response = self.client.post(feedback_url, test_data)
self.assertContains(response, "alert-success")
self.assertNotContains(response, "feedbackform")
## check objects
feedback = Feedback.objects.filter(positions__in=[position],
nominees__in=[nominee],
type=FeedbackTypeName.objects.get(slug='comment')).latest('id')
if public:
self.assertEqual(feedback.author, nominator_email)
## to check feedback comments are saved like enrypted data
self.assertNotEqual(feedback.comments, comments)
self.assertEqual(check_comments(feedback.comments, comments, self.privatekey_file), True)
# recovery state
if state != nominee_position.state:
nominee_position.state = state
nominee_position.save()
class NomineePositionStateSaveTest(TestCase):
"""Tests for the NomineePosition save override method"""
def setUp(self):
setup_test_public_keys_dir(self)
nomcom_test_data()
self.nominee = Nominee.objects.get(email__person__user__username=COMMUNITY_USER)
def tearDown(self):
teardown_test_public_keys_dir(self)
def test_state_autoset(self):
"""Verify state is autoset correctly"""
position = Position.objects.get(name='APP')
nominee_position = NomineePosition.objects.create(position=position,
nominee=self.nominee)
self.assertEqual(nominee_position.state.slug, 'pending')
def test_state_specified(self):
"""Verify state if specified"""
position = Position.objects.get(name='INT')
nominee_position = NomineePosition.objects.create(position=position,
nominee=self.nominee,
state=NomineePositionStateName.objects.get(slug='accepted'))
self.assertEqual(nominee_position.state.slug, 'accepted')
def test_nomine_position_unique(self):
"""Verify nomine and position are unique together"""
position = Position.objects.get(name='OAM')
NomineePosition.objects.create(position=position,
nominee=self.nominee)
nominee_position = NomineePosition(position=position, nominee=self.nominee)
self.assertRaises(IntegrityError, nominee_position.save)
class FeedbackTest(TestCase):
def setUp(self):
setup_test_public_keys_dir(self)
nomcom_test_data()
self.cert_file, self.privatekey_file = get_cert_files()
def tearDown(self):
teardown_test_public_keys_dir(self)
def test_encrypted_comments(self):
nominee = Nominee.objects.get(email__person__user__username=COMMUNITY_USER)
position = Position.objects.get(name='OAM')
nomcom = position.nomcom
# save the cert file in tmp
#nomcom.public_key.storage.location = tempfile.gettempdir()
nomcom.public_key.save('cert', File(io.open(self.cert_file.name, 'r')))
comment_text = 'Plain text. Comments with accents äöåÄÖÅ éáíóú âêîôû ü àèìòù.'
comments = nomcom.encrypt(comment_text)
feedback = Feedback.objects.create(nomcom=nomcom,
comments=comments,
type=FeedbackTypeName.objects.get(slug='nomina'))
feedback.positions.add(position)
feedback.nominees.add(nominee)
# to check feedback comments are saved like enrypted data
self.assertNotEqual(feedback.comments, comment_text)
self.assertEqual(check_comments(feedback.comments, comment_text, self.privatekey_file), True)
class ReminderTest(TestCase):
def setUp(self):
setup_test_public_keys_dir(self)
nomcom_test_data()
self.nomcom = get_nomcom_by_year(NOMCOM_YEAR)
self.cert_file, self.privatekey_file = get_cert_files()
#self.nomcom.public_key.storage.location = tempfile.gettempdir()
self.nomcom.public_key.save('cert', File(io.open(self.cert_file.name, 'r')))
gen = Position.objects.get(nomcom=self.nomcom,name='GEN')
rai = Position.objects.get(nomcom=self.nomcom,name='RAI')
iab = Position.objects.get(nomcom=self.nomcom,name='IAB')
today = datetime.date.today()
t_minus_3 = today - datetime.timedelta(days=3)
t_minus_4 = today - datetime.timedelta(days=4)
e1 = EmailFactory(address="nominee1@example.org", person=PersonFactory(name="Nominee 1"), origin='test')
e2 = EmailFactory(address="nominee2@example.org", person=PersonFactory(name="Nominee 2"), origin='test')
n = make_nomineeposition(self.nomcom,e1.person,gen,None)
np = n.nomineeposition_set.get(position=gen)
np.time = t_minus_3
np.save()
n = make_nomineeposition(self.nomcom,e1.person,iab,None)
np = n.nomineeposition_set.get(position=iab)
np.state = NomineePositionStateName.objects.get(slug='accepted')
np.time = t_minus_3
np.save()
n = make_nomineeposition(self.nomcom,e2.person,rai,None)
np = n.nomineeposition_set.get(position=rai)
np.time = t_minus_4
np.save()
n = make_nomineeposition(self.nomcom,e2.person,gen,None)
np = n.nomineeposition_set.get(position=gen)
np.state = NomineePositionStateName.objects.get(slug='accepted')
np.time = t_minus_4
np.save()
feedback = Feedback.objects.create(nomcom=self.nomcom,
comments=self.nomcom.encrypt('some non-empty comments'),
type=FeedbackTypeName.objects.get(slug='questio'),
user=User.objects.get(username=CHAIR_USER))
feedback.positions.add(gen)
feedback.nominees.add(n)
def tearDown(self):
teardown_test_public_keys_dir(self)
def test_is_time_to_send(self):
self.nomcom.reminder_interval = 4
today = datetime.date.today()
self.assertTrue(is_time_to_send(self.nomcom,today+datetime.timedelta(days=4),today))
for delta in range(4):
self.assertFalse(is_time_to_send(self.nomcom,today+datetime.timedelta(days=delta),today))
self.nomcom.reminder_interval = None
self.assertFalse(is_time_to_send(self.nomcom,today,today))
self.nomcom.reminderdates_set.create(date=today)
self.assertTrue(is_time_to_send(self.nomcom,today,today))
def test_command(self):
c = Command()
messages_before=len(outbox)
self.nomcom.reminder_interval = 3
self.nomcom.save()
c.handle(None,None)
self.assertEqual(len(outbox), messages_before + 2)
self.assertIn('nominee1@example.org', outbox[-1]['To'])
self.assertIn('please complete', outbox[-1]['Subject'])
self.assertIn('nominee1@example.org', outbox[-2]['To'])
self.assertIn('please accept', outbox[-2]['Subject'])
messages_before=len(outbox)
self.nomcom.reminder_interval = 4
self.nomcom.save()
c.handle(None,None)
self.assertEqual(len(outbox), messages_before + 1)
self.assertIn('nominee2@example.org', outbox[-1]['To'])
self.assertIn('please accept', outbox[-1]['Subject'])
def test_remind_accept_view(self):
url = reverse('ietf.nomcom.views.send_reminder_mail', kwargs={'year': NOMCOM_YEAR,'type':'accept'})
login_testing_unauthorized(self, CHAIR_USER, url)
messages_before=len(outbox)
test_data = {'selected': [x.id for x in Nominee.objects.filter(nomcom=self.nomcom)]}
response = self.client.post(url, test_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(outbox), messages_before + 2)
self.assertIn('nominee1@', outbox[-2]['To'])
self.assertIn('nominee2@', outbox[-1]['To'])
def test_remind_questionnaire_view(self):
url = reverse('ietf.nomcom.views.send_reminder_mail', kwargs={'year': NOMCOM_YEAR,'type':'questionnaire'})
login_testing_unauthorized(self, CHAIR_USER, url)
messages_before=len(outbox)
test_data = {'selected': [x.id for x in Nominee.objects.filter(nomcom=self.nomcom)]}
response = self.client.post(url, test_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(outbox), messages_before + 1)
self.assertIn('nominee1@', outbox[-1]['To'])
class InactiveNomcomTests(TestCase):
def setUp(self):
setup_test_public_keys_dir(self)
self.nc = NomComFactory.create(**nomcom_kwargs_for_year(group__state_id='conclude'))
self.plain_person = PersonFactory.create()
self.chair = self.nc.group.role_set.filter(name='chair').first().person
self.member = self.nc.group.role_set.filter(name='member').first().person
def tearDown(self):
teardown_test_public_keys_dir(self)
def test_feedback_closed(self):
for view in ['ietf.nomcom.views.public_feedback', 'ietf.nomcom.views.private_feedback']:
url = reverse(view, kwargs={'year': self.nc.year()})
who = self.plain_person if 'public' in view else self.member
login_testing_unauthorized(self, who.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertIn( '(Concluded)', q('h1').text())
self.assertIn( 'closed', q('#instructions').text())
self.assertTrue( q('#nominees a') )
self.assertFalse( q('#nominees a[href]') )
url += "?nominee=%d&position=%d" % (self.nc.nominee_set.order_by('pk').first().id, self.nc.nominee_set.order_by('pk').first().nomineeposition_set.order_by('pk').first().position.id)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertFalse( q('#feedbackform'))
empty_outbox()
fb_before = self.nc.feedback_set.count()
test_data = {'comment_text': 'Test feedback view. Comments with accents äöåÄÖÅ éáíóú âêîôû ü àèìòù.',
'nominator_email': self.plain_person.email_set.first().address,
'confirmation': True}
response = self.client.post(url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertIn( 'closed', q('#instructions').text())
self.assertEqual( len(outbox), 0 )
self.assertEqual( fb_before, self.nc.feedback_set.count() )
def test_nominations_closed(self):
for view in ['ietf.nomcom.views.public_nominate', 'ietf.nomcom.views.private_nominate']:
url = reverse(view, kwargs={'year': self.nc.year() })
who = self.plain_person if 'public' in view else self.member
login_testing_unauthorized(self, who.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertIn( '(Concluded)', q('h1').text())
self.assertIn( 'closed', q('.alert-warning').text())
def test_acceptance_closed(self):
today = datetime.date.today().strftime('%Y%m%d')
pid = self.nc.position_set.first().nomineeposition_set.order_by('pk').first().id
url = reverse('ietf.nomcom.views.process_nomination_status', kwargs = {
'year' : self.nc.year(),
'nominee_position_id' : pid,
'state' : 'accepted',
'date' : today,
'hash' : get_hash_nominee_position(today,pid),
})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_can_view_but_cannot_edit_nomcom_settings(self):
url = reverse('ietf.nomcom.views.edit_nomcom',kwargs={'year':self.nc.year() })
login_testing_unauthorized(self, self.chair.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url,{})
self.assertEqual(response.status_code, 403)
def test_cannot_classify_feedback(self):
url = reverse('ietf.nomcom.views.view_feedback_pending',kwargs={'year':self.nc.year() })
login_testing_unauthorized(self, self.chair.user.username, url)
provide_private_key_to_test_client(self)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
response = self.client.post(url,{})
self.assertEqual(response.status_code, 403)
def test_cannot_modify_nominees(self):
url = reverse('ietf.nomcom.views.private_index', kwargs={'year':self.nc.year()})
login_testing_unauthorized(self, self.chair.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertFalse( q('#batch-action-form'))
test_data = {"action": "set_as_pending",
"selected": [1]}
response = self.client.post(url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertIn('not active', q('.alert-warning').text() )
def test_email_pasting_closed(self):
url = reverse('ietf.nomcom.views.private_feedback_email', kwargs={'year':self.nc.year()})
login_testing_unauthorized(self, self.chair.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertFalse( q('#paste-email-feedback-form'))
test_data = {"email_text": "some garbage text",
}
response = self.client.post(url, test_data)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertIn('not active', q('.alert-warning').text() )
def test_questionnaire_entry_closed(self):
url = reverse('ietf.nomcom.views.private_questionnaire', kwargs={'year':self.nc.year()})
login_testing_unauthorized(self, self.chair.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertFalse( q('#questionnaireform'))
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertIn('not active', q('.alert-warning').text() )
def _test_send_reminders_closed(self,rtype):
url = reverse('ietf.nomcom.views.send_reminder_mail', kwargs={'year':self.nc.year(),'type':rtype })
login_testing_unauthorized(self, self.chair.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertFalse( q('#reminderform'))
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertIn('not active', q('.alert-warning').text() )
def test_send_accept_reminders_closed(self):
self._test_send_reminders_closed('accept')
def test_send_questionnaire_reminders_closed(self):
self._test_send_reminders_closed('questionnaire')
def test_merge_closed(self):
url = reverse('ietf.nomcom.views.private_merge_person', kwargs={'year':self.nc.year()})
login_testing_unauthorized(self, self.chair.user.username, url)
response = self.client.get(url)
q = PyQuery(response.content)
self.assertFalse( q('#mergeform'))
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertIn('not active', q('.alert-warning').text() )
def test_cannot_edit_position(self):
url = reverse('ietf.nomcom.views.edit_position',kwargs={'year':self.nc.year(),'position_id':self.nc.position_set.first().id})
login_testing_unauthorized(self, self.chair.user.username, url)
provide_private_key_to_test_client(self)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
response = self.client.post(url,{})
self.assertEqual(response.status_code, 403)
def test_cannot_add_position(self):
url = reverse('ietf.nomcom.views.edit_position',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self, self.chair.user.username, url)
provide_private_key_to_test_client(self)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
response = self.client.post(url,{})
self.assertEqual(response.status_code, 403)
def test_cannot_delete_position(self):
url = reverse('ietf.nomcom.views.remove_position',kwargs={'year':self.nc.year(),'position_id':self.nc.position_set.first().id})
login_testing_unauthorized(self, self.chair.user.username, url)
provide_private_key_to_test_client(self)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
response = self.client.post(url,{})
self.assertEqual(response.status_code, 403)
def test_can_view_but_not_edit_templates(self):
template = DBTemplateFactory.create(group=self.nc.group,
title='Test template',
path='/nomcom/'+self.nc.group.acronym+'/test',
variables='',
type_id='plain',
content='test content')
url = reverse('ietf.nomcom.views.edit_template',kwargs={'year':self.nc.year(), 'template_id':template.id})
login_testing_unauthorized(self, self.chair.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
self.assertFalse( q('#templateform') )
class FeedbackLastSeenTests(TestCase):
def setUp(self):
setup_test_public_keys_dir(self)
self.nc = NomComFactory.create(**nomcom_kwargs_for_year())
self.author = PersonFactory.create().email_set.first().address
self.member = self.nc.group.role_set.filter(name='member').first().person
self.nominee = self.nc.nominee_set.order_by('pk').first()
self.position = self.nc.position_set.first()
self.topic = self.nc.topic_set.first()
for type_id in ['comment','nomina','questio']:
f = FeedbackFactory.create(author=self.author,nomcom=self.nc,type_id=type_id)
f.positions.add(self.position)
f.nominees.add(self.nominee)
f = FeedbackFactory.create(author=self.author,nomcom=self.nc,type_id='comment')
f.topics.add(self.topic)
now = datetime.datetime.now()
self.hour_ago = now - datetime.timedelta(hours=1)
self.half_hour_ago = now - datetime.timedelta(minutes=30)
self.second_from_now = now + datetime.timedelta(seconds=1)
def tearDown(self):
teardown_test_public_keys_dir(self)
def test_feedback_index_badges(self):
url = reverse('ietf.nomcom.views.view_feedback',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self, self.member.user.username, url)
provide_private_key_to_test_client(self)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 4 )
f = self.nc.feedback_set.first()
f.time = self.hour_ago
f.save()
FeedbackLastSeen.objects.create(reviewer=self.member,nominee=self.nominee)
FeedbackLastSeen.objects.update(time=self.half_hour_ago)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 3 )
FeedbackLastSeen.objects.update(time=self.second_from_now)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 1 )
TopicFeedbackLastSeen.objects.create(reviewer=self.member,topic=self.topic)
TopicFeedbackLastSeen.objects.update(time=self.second_from_now)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 0 )
def test_feedback_nominee_badges(self):
url = reverse('ietf.nomcom.views.view_feedback_nominee', kwargs={'year':self.nc.year(), 'nominee_id':self.nominee.id})
login_testing_unauthorized(self, self.member.user.username, url)
provide_private_key_to_test_client(self)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 3 )
f = self.nc.feedback_set.first()
f.time = self.hour_ago
f.save()
FeedbackLastSeen.objects.create(reviewer=self.member,nominee=self.nominee)
FeedbackLastSeen.objects.update(time=self.half_hour_ago)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 2 )
FeedbackLastSeen.objects.update(time=self.second_from_now)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 0 )
def test_feedback_topic_badges(self):
url = reverse('ietf.nomcom.views.view_feedback_topic', kwargs={'year':self.nc.year(), 'topic_id':self.topic.id})
login_testing_unauthorized(self, self.member.user.username, url)
provide_private_key_to_test_client(self)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 1 )
f = self.topic.feedback_set.first()
f.time = self.hour_ago
f.save()
TopicFeedbackLastSeen.objects.create(reviewer=self.member,topic=self.topic)
TopicFeedbackLastSeen.objects.update(time=self.half_hour_ago)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 0 )
TopicFeedbackLastSeen.objects.update(time=self.second_from_now)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual( len(q('.label-success')), 0 )
class NewActiveNomComTests(TestCase):
def setUp(self):
setup_test_public_keys_dir(self)
self.nc = NomComFactory.create(**nomcom_kwargs_for_year())
self.chair = self.nc.group.role_set.filter(name='chair').first().person
self.saved_days_to_expire_nomination_link = settings.DAYS_TO_EXPIRE_NOMINATION_LINK
def tearDown(self):
teardown_test_public_keys_dir(self)
settings.DAYS_TO_EXPIRE_NOMINATION_LINK = self.saved_days_to_expire_nomination_link
def test_help(self):
url = reverse('ietf.nomcom.views.configuration_help',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self, self.chair.user.username, url)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
def test_accept_reject_nomination_edges(self):
self.client.logout()
np = self.nc.nominee_set.order_by('pk').first().nomineeposition_set.order_by('pk').first()
kwargs={'year':self.nc.year(),
'nominee_position_id':np.id,
'state':'accepted',
'date':np.time.strftime("%Y%m%d"),
'hash':get_hash_nominee_position(np.time.strftime("%Y%m%d"),np.id),
}
url = reverse('ietf.nomcom.views.process_nomination_status', kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code,403)
self.assertIn('already was', unicontent(response))
settings.DAYS_TO_EXPIRE_NOMINATION_LINK = 2
np.time = np.time - datetime.timedelta(days=3)
np.save()
kwargs['date'] = np.time.strftime("%Y%m%d")
kwargs['hash'] = get_hash_nominee_position(np.time.strftime("%Y%m%d"),np.id)
url = reverse('ietf.nomcom.views.process_nomination_status', kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code,403)
self.assertIn('Link expired', unicontent(response))
kwargs['hash'] = 'bad'
url = reverse('ietf.nomcom.views.process_nomination_status', kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code,403)
self.assertIn('Bad hash!', unicontent(response))
def test_accept_reject_nomination_comment(self):
np = self.nc.nominee_set.order_by('pk').first().nomineeposition_set.order_by('pk').first()
hash = get_hash_nominee_position(np.time.strftime("%Y%m%d"),np.id)
url = reverse('ietf.nomcom.views.process_nomination_status',
kwargs={'year':self.nc.year(),
'nominee_position_id':np.id,
'state':'accepted',
'date':np.time.strftime("%Y%m%d"),
'hash':hash,
}
)
np.state_id='pending'
np.save()
response = self.client.get(url)
self.assertEqual(response.status_code,200)
feedback_count_before = Feedback.objects.count()
response = self.client.post(url,{})
# This view uses Yaco-style POST handling
self.assertEqual(response.status_code,200)
self.assertEqual(Feedback.objects.count(),feedback_count_before)
np.state_id='pending'
np.save()
response = self.client.post(url,{'comments':'A nonempty comment'})
self.assertEqual(response.status_code,200)
self.assertEqual(Feedback.objects.count(),feedback_count_before+1)
def test_provide_private_key(self):
url = reverse('ietf.nomcom.views.private_key',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
response = self.client.post(url,{'key': force_str(key)})
self.assertEqual(response.status_code,302)
def test_email_pasting(self):
url = reverse('ietf.nomcom.views.private_feedback_email',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
fb_count_before = Feedback.objects.count()
response = self.client.post(url,{'email_text':"""To: rjsparks@nostrum.com
From: Robert Sparks <rjsparks@nostrum.com>
Subject: Junk message for feedback testing =?iso-8859-1?q?p=F6stal?=
Message-ID: <566F2FE5.1050401@nostrum.com>
Date: Mon, 14 Dec 2015 15:08:53 -0600
Content-Type: text/plain; charset=utf-8; format=flowed
Content-Transfer-Encoding: 7bit
Junk body for testing
"""})
self.assertEqual(response.status_code,200)
self.assertEqual(Feedback.objects.count(),fb_count_before+1)
def test_simple_feedback_pending(self):
url = reverse('ietf.nomcom.views.view_feedback_pending',kwargs={'year':self.nc.year() })
login_testing_unauthorized(self, self.chair.user.username, url)
provide_private_key_to_test_client(self)
# test simple classification when there's only one thing to classify
# junk is the only category you can set directly from the first form the view presents
fb = FeedbackFactory(nomcom=self.nc,type_id=None)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
response = self.client.post(url, {'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-0-id': fb.id,
'form-0-type': 'junk',
})
self.assertEqual(response.status_code,302)
fb = Feedback.objects.get(id=fb.id)
self.assertEqual(fb.type_id,'junk')
# comments, nominations, and questionnare responses are catagorized via a second
# formset presented by the view (signaled by having 'end' appear in the POST)
fb = FeedbackFactory(nomcom=self.nc,type_id=None)
np = NomineePosition.objects.filter(position__nomcom = self.nc,state='accepted').first()
fb_count_before = np.nominee.feedback_set.count()
response = self.client.post(url, {'form-TOTAL_FORMS':1,
'form-INITIAL_FORMS':1,
'end':'Save feedback',
'form-0-id': fb.id,
'form-0-type': 'comment',
'form-0-nominee': '%s_%s'%(np.position.id,np.nominee.id),
})
self.assertEqual(response.status_code,302)
fb = Feedback.objects.get(id=fb.id)
self.assertEqual(fb.type_id,'comment')
self.assertEqual(np.nominee.feedback_set.count(),fb_count_before+1)
fb = FeedbackFactory(nomcom=self.nc,type_id=None)
nominee = self.nc.nominee_set.order_by('pk').first()
position = self.nc.position_set.exclude(nomineeposition__nominee=nominee).first()
self.assertIsNotNone(position)
fb_count_before = nominee.feedback_set.count()
response = self.client.post(url, {'form-TOTAL_FORMS':1,
'form-INITIAL_FORMS':1,
'end':'Save feedback',
'form-0-id': fb.id,
'form-0-type': 'nomina',
'form-0-position': position.id,
'form-0-searched_email' : nominee.email.address,
})
self.assertEqual(response.status_code,302)
fb = Feedback.objects.get(id=fb.id)
self.assertEqual(fb.type_id,'nomina')
self.assertEqual(nominee.feedback_set.count(),fb_count_before+1)
# Classify a newperson
fb = FeedbackFactory(nomcom=self.nc,type_id=None)
position = self.nc.position_set.first()
response = self.client.post(url, {'form-TOTAL_FORMS':1,
'form-INITIAL_FORMS':1,
'end':'Save feedback',
'form-0-id': fb.id,
'form-0-type': 'nomina',
'form-0-position': position.id,
'form-0-candidate_email' : 'newperson@example.com',
'form-0-candidate_name' : 'New Person',
})
self.assertEqual(response.status_code,302)
fb = Feedback.objects.get(id=fb.id)
self.assertEqual(fb.type_id,'nomina')
self.assertTrue(fb.nominees.filter(person__name='New Person').exists())
# check for failure when trying to add a newperson that already exists
fb = FeedbackFactory(nomcom=self.nc,type_id=None)
position = self.nc.position_set.all()[1]
nominee = self.nc.nominee_set.get(person__email__address='newperson@example.com')
fb_count_before = nominee.feedback_set.count()
response = self.client.post(url, {'form-TOTAL_FORMS':1,
'form-INITIAL_FORMS':1,
'end':'Save feedback',
'form-0-id': fb.id,
'form-0-type': 'nomina',
'form-0-position': position.id,
'form-0-candidate_email' : 'newperson@example.com',
'form-0-candidate_name' : 'New Person',
})
self.assertEqual(response.status_code,200)
self.assertTrue('already exists' in unicontent(response))
fb = Feedback.objects.get(id=fb.id)
self.assertEqual(fb.type_id,None)
self.assertEqual(nominee.feedback_set.count(),fb_count_before)
fb = FeedbackFactory(nomcom=self.nc,type_id=None)
np = NomineePosition.objects.filter(position__nomcom = self.nc,state='accepted').first()
fb_count_before = np.nominee.feedback_set.count()
response = self.client.post(url, {'form-TOTAL_FORMS':1,
'form-INITIAL_FORMS':1,
'end':'Save feedback',
'form-0-id': fb.id,
'form-0-type': 'questio',
'form-0-nominee' : '%s_%s'%(np.position.id,np.nominee.id),
})
self.assertEqual(response.status_code,302)
fb = Feedback.objects.get(id=fb.id)
self.assertEqual(fb.type_id,'questio')
self.assertEqual(np.nominee.feedback_set.count(),fb_count_before+1)
def test_complicated_feedback_pending(self):
url = reverse('ietf.nomcom.views.view_feedback_pending',kwargs={'year':self.nc.year() })
login_testing_unauthorized(self, self.chair.user.username, url)
provide_private_key_to_test_client(self)
# Test having multiple things to classify
# The view has some complicated to handle having some forms in the initial form formset
# being categorized as 'junk' and others being categorized as something that requires
# more information. The second formset presented will have forms for any others initially
# categorized as nominations, then a third formset will be presented with any that were
# initially categorized as comments or questionnaire responses. The following exercises
# all the gears that glue these three formset presentations together.
fb0 = FeedbackFactory(nomcom=self.nc,type_id=None)
fb1 = FeedbackFactory(nomcom=self.nc,type_id=None)
fb2 = FeedbackFactory(nomcom=self.nc,type_id=None)
nominee = self.nc.nominee_set.order_by('pk').first()
new_position_for_nominee = self.nc.position_set.exclude(nomineeposition__nominee=nominee).first()
# Initial formset
response = self.client.post(url, {'form-TOTAL_FORMS': 3,
'form-INITIAL_FORMS': 3,
'form-0-id': fb0.id,
'form-0-type': 'junk',
'form-1-id': fb1.id,
'form-1-type': 'nomina',
'form-2-id': fb2.id,
'form-2-type': 'comment',
})
self.assertEqual(response.status_code,200) # Notice that this is not a 302
fb0 = Feedback.objects.get(id=fb0.id)
self.assertEqual(fb0.type_id,'junk')
q = PyQuery(response.content)
self.assertEqual(q('input[name=\"form-0-type\"]').attr['value'],'nomina')
self.assertEqual(q('input[name=\"extra_ids\"]').attr['value'],'%s:comment' % fb2.id)
# Second formset
response = self.client.post(url, {'form-TOTAL_FORMS':1,
'form-INITIAL_FORMS':1,
'end':'Save feedback',
'form-0-id': fb1.id,
'form-0-type': 'nomina',
'form-0-position': new_position_for_nominee.id,
'form-0-candidate_name' : 'Totally New Person',
'form-0-candidate_email': 'totallynew@example.org',
'extra_ids': '%s:comment' % fb2.id,
})
self.assertEqual(response.status_code,200) # Notice that this is also is not a 302
q = PyQuery(response.content)
self.assertEqual(q('input[name=\"form-0-type\"]').attr['value'],'comment')
self.assertFalse(q('input[name=\"extra_ids\"]'))
fb1 = Feedback.objects.get(id=fb1.id)
self.assertEqual(fb1.type_id,'nomina')
# Exercising the resulting third formset is identical to the simple test above
# that categorizes a single thing as a comment. Note that it returns a 302.
# There is yet another code-path for transitioning to the second form when
# nothing was classified as a nomination.
fb0 = FeedbackFactory(nomcom=self.nc,type_id=None)
fb1 = FeedbackFactory(nomcom=self.nc,type_id=None)
response = self.client.post(url, {'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-0-id': fb0.id,
'form-0-type': 'junk',
'form-1-id': fb1.id,
'form-1-type': 'comment',
})
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual(q('input[name=\"form-0-type\"]').attr['value'],'comment')
self.assertFalse(q('input[name=\"extra_ids\"]'))
def test_feedback_unrelated(self):
FeedbackFactory(nomcom=self.nc,type_id='junk')
url=reverse('ietf.nomcom.views.view_feedback_unrelated',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
provide_private_key_to_test_client(self)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
def test_list_templates(self):
DBTemplateFactory.create(group=self.nc.group,
title='Test template',
path='/nomcom/'+self.nc.group.acronym+'/test',
variables='',
type_id='plain',
content='test content')
url=reverse('ietf.nomcom.views.list_templates',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
def test_edit_templates(self):
template = DBTemplateFactory.create(group=self.nc.group,
title='Test template',
path='/nomcom/'+self.nc.group.acronym+'/test',
variables='',
type_id='plain',
content='test content')
url=reverse('ietf.nomcom.views.edit_template',kwargs={'year':self.nc.year(),'template_id':template.id})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
response = self.client.post(url,{'content': 'more interesting test content'})
self.assertEqual(response.status_code,302)
template = DBTemplate.objects.get(id=template.id)
self.assertEqual('more interesting test content',template.content)
def test_list_positions(self):
url = reverse('ietf.nomcom.views.list_positions',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
def test_remove_position(self):
position = self.nc.position_set.filter(nomineeposition__isnull=False).first()
f = FeedbackFactory(nomcom=self.nc)
f.positions.add(position)
url = reverse('ietf.nomcom.views.remove_position',kwargs={'year':self.nc.year(),'position_id':position.id})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertTrue(any(['likely to be harmful' in x.text for x in q('.alert-warning')]))
response = self.client.post(url,{'remove':position.id})
self.assertEqual(response.status_code, 302)
self.assertFalse(self.nc.position_set.filter(id=position.id))
def test_remove_invalid_position(self):
no_such_position_id = self.nc.position_set.aggregate(Max('id'))['id__max']+1
url = reverse('ietf.nomcom.views.remove_position',kwargs={'year':self.nc.year(),'position_id':no_such_position_id})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_edit_position(self):
position = self.nc.position_set.filter(is_open=True).first()
url = reverse('ietf.nomcom.views.edit_position',kwargs={'year':self.nc.year(),'position_id':position.id})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url,{'name':'more interesting test name'})
self.assertEqual(response.status_code, 302)
position = Position.objects.get(id=position.id)
self.assertEqual('more interesting test name',position.name)
self.assertFalse(position.is_open)
def test_edit_invalid_position(self):
no_such_position_id = self.nc.position_set.aggregate(Max('id'))['id__max']+1
url = reverse('ietf.nomcom.views.edit_position',kwargs={'year':self.nc.year(),'position_id':no_such_position_id})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_edit_nominee(self):
nominee = self.nc.nominee_set.order_by('pk').first()
new_email = EmailFactory(person=nominee.person, origin='test')
url = reverse('ietf.nomcom.views.edit_nominee',kwargs={'year':self.nc.year(),'nominee_id':nominee.id})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url,{'nominee_email':new_email.address})
self.assertEqual(response.status_code, 302)
nominee = self.nc.nominee_set.order_by('pk').first()
self.assertEqual(nominee.email,new_email)
def test_request_merge(self):
nominee1, nominee2 = self.nc.nominee_set.all()[:2]
url = reverse('ietf.nomcom.views.private_merge_person',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
empty_outbox()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url,{'primary_person':nominee1.person.pk,
'duplicate_persons':[nominee1.person.pk]})
self.assertEqual(response.status_code, 200)
self.assertIn('must not also be listed as a duplicate', unicontent(response))
response = self.client.post(url,{'primary_person':nominee1.person.pk,
'duplicate_persons':[nominee2.person.pk]})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(outbox),1)
self.assertTrue(all([str(x.person.pk) in outbox[0].get_payload() for x in [nominee1,nominee2]]))
def test_extract_email(self):
url = reverse('ietf.nomcom.views.extract_email_lists',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_eligible(self):
def first_meeting_of_year(year):
assert isinstance(year, int)
assert year >= 1991
return (year-1985)*3+2
# Create meetings to ensure we have the 'last 5'
meeting_start = first_meeting_of_year(datetime.date.today().year-2)
# Populate the meeting registration records
for number in range(meeting_start, meeting_start+10):
meeting = MeetingFactory.create(type_id='ietf', number=number)
PersonFactory.create_batch(3)
samples = Person.objects.count()//2
for (person, ascii, email) in random.sample([ (p, p.ascii, p.email()) for p in Person.objects.all() ], samples):
if not ' ' in ascii:
continue
first_name, last_name = ascii.rsplit(None, 1)
MeetingRegistration.objects.create(meeting=meeting, first_name=first_name, last_name=last_name, person=person, country_code='WO', email=email, attended=True)
for view in ('public_eligible','private_eligible'):
url = reverse(f'ietf.nomcom.views.{view}',kwargs={'year':self.nc.year()})
for username in (self.chair.user.username,'secretary'):
login_testing_unauthorized(self,username,url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.logout()
self.client.login(username='plain',password='plain+password')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_volunteers(self):
year = self.nc.year()
def first_meeting_of_year(year):
assert isinstance(year, int)
assert year >= 1991
return (year-1985)*3+2
people = PersonFactory.create_batch(10)
meeting_start = first_meeting_of_year(year-2)
for number in range(meeting_start, meeting_start+8):
m = MeetingFactory.create(type_id='ietf', number=number)
for p in people:
m.meetingregistration_set.create(person=p)
for p in people:
self.nc.volunteer_set.create(person=p,affiliation='something')
for view in ('public_volunteers','private_volunteers'):
url = reverse(f'ietf.nomcom.views.{view}', kwargs=dict(year=self.nc.year()))
for username in (self.chair.user.username,'secretary'):
login_testing_unauthorized(self,username,url)
response = self.client.get(url)
self.assertContains(response,people[-1].email(),status_code=200)
self.client.logout()
self.client.login(username='plain',password='plain+password')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
class NomComIndexTests(TestCase):
def setUp(self):
for year in range(2000,2014):
NomComFactory.create(**nomcom_kwargs_for_year(year=year,populate_positions=False,populate_personnel=False))
def testIndex(self):
url = reverse('ietf.nomcom.views.index')
response = self.client.get(url)
self.assertEqual(response.status_code,200)
class NoPublicKeyTests(TestCase):
def setUp(self):
self.nc = NomComFactory.create(**nomcom_kwargs_for_year(public_key=None))
self.chair = self.nc.group.role_set.filter(name='chair').first().person
def do_common_work(self,url,expected_form):
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q=PyQuery(response.content)
text_bits = [x.xpath('./text()') for x in q('.alert-warning')]
flat_text_bits = [item for sublist in text_bits for item in sublist]
self.assertTrue(any(['not yet' in y for y in flat_text_bits]))
self.assertEqual(bool(q('form:not(.navbar-form)')),expected_form)
self.client.logout()
def test_not_yet(self):
# Warn reminder mail
self.do_common_work(reverse('ietf.nomcom.views.send_reminder_mail',kwargs={'year':self.nc.year(),'type':'accept'}),True)
# No nominations
self.do_common_work(reverse('ietf.nomcom.views.private_nominate',kwargs={'year':self.nc.year()}),False)
# No feedback
self.do_common_work(reverse('ietf.nomcom.views.private_feedback',kwargs={'year':self.nc.year()}),False)
# No feedback email
self.do_common_work(reverse('ietf.nomcom.views.private_feedback_email',kwargs={'year':self.nc.year()}),False)
# No questionnaire responses
self.do_common_work(reverse('ietf.nomcom.views.private_questionnaire',kwargs={'year':self.nc.year()}),False)
class AcceptingTests(TestCase):
def setUp(self):
setup_test_public_keys_dir(self)
self.nc = NomComFactory(**nomcom_kwargs_for_year())
self.plain_person = PersonFactory.create()
self.member = self.nc.group.role_set.filter(name='member').first().person
def tearDown(self):
teardown_test_public_keys_dir(self)
def test_public_accepting_nominations(self):
url = reverse('ietf.nomcom.views.public_nominate',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.plain_person.user.username,url)
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('#id_position option')) , 4 )
pos = self.nc.position_set.first()
pos.accepting_nominations=False
pos.save()
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('#id_position option')) , 3 )
def test_private_accepting_nominations(self):
url = reverse('ietf.nomcom.views.private_nominate',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.member.user.username,url)
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('#id_position option')) , 4 )
pos = self.nc.position_set.first()
pos.accepting_nominations=False
pos.save()
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('#id_position option')) , 4 )
def test_public_accepting_feedback(self):
url = reverse('ietf.nomcom.views.public_feedback',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.plain_person.user.username,url)
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('.badge')) , 6 )
pos = self.nc.position_set.first()
pos.accepting_feedback=False
pos.save()
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('.badge')) , 5 )
topic = self.nc.topic_set.first()
topic.accepting_feedback=False
topic.save()
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('.badge')) , 4 )
posurl = url+ "?nominee=%d&position=%d" % (pos.nominee_set.first().pk, pos.pk)
response = self.client.get(posurl)
self.assertIn('not currently accepting feedback', unicontent(response))
test_data = {'comment_text': 'junk',
'position_name': pos.name,
'nominee_name': pos.nominee_set.first().email.person.name,
'nominee_email': pos.nominee_set.first().email.address,
'confirmation': False,
'nominator_email': self.plain_person.email().address,
'nominator_name': self.plain_person.plain_name(),
}
response = self.client.post(posurl, test_data)
self.assertIn('not currently accepting feedback', unicontent(response))
topicurl = url+ "?topic=%d" % (topic.pk, )
response = self.client.get(topicurl)
self.assertIn('not currently accepting feedback', unicontent(response))
test_data = {'comment_text': 'junk',
'confirmation': False,
}
response = self.client.post(topicurl, test_data)
self.assertIn('not currently accepting feedback', unicontent(response))
def test_private_accepting_feedback(self):
url = reverse('ietf.nomcom.views.private_feedback',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.member.user.username,url)
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('.badge')) , 6 )
pos = self.nc.position_set.first()
pos.accepting_feedback=False
pos.save()
response = self.client.get(url)
q=PyQuery(response.content)
self.assertEqual( len(q('.badge')) , 6 )
class ShowNomineeTests(TestCase):
def setUp(self):
setup_test_public_keys_dir(self)
self.nc = NomComFactory(**nomcom_kwargs_for_year())
self.plain_person = PersonFactory.create()
def tearDown(self):
teardown_test_public_keys_dir(self)
def test_feedback_pictures(self):
url = reverse('ietf.nomcom.views.public_nominate',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.plain_person.user.username,url)
response = self.client.get(url)
q = PyQuery(response.content)
self.assertTrue(q('h3'))
self.nc.show_accepted_nominees=False;
self.nc.save()
response = self.client.get(url)
q = PyQuery(response.content)
self.assertFalse(q('h3'))
class TopicTests(TestCase):
def setUp(self):
setup_test_public_keys_dir(self)
self.nc = NomComFactory(**nomcom_kwargs_for_year(populate_topics=False))
self.plain_person = PersonFactory.create()
self.chair = self.nc.group.role_set.filter(name='chair').first().person
def tearDown(self):
teardown_test_public_keys_dir(self)
def testAddEditListRemoveTopic(self):
self.assertFalse(self.nc.topic_set.exists())
url = reverse('ietf.nomcom.views.edit_topic', kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
response = self.client.post(url,{'subject':'Test Topic', 'accepting_feedback':True, 'audience':'general'})
self.assertEqual(response.status_code,302)
self.assertEqual(self.nc.topic_set.first().subject,'Test Topic')
self.assertEqual(self.nc.topic_set.first().accepting_feedback, True)
self.assertEqual(self.nc.topic_set.first().audience.slug,'general')
url = reverse('ietf.nomcom.views.edit_topic', kwargs={'year':self.nc.year(),'topic_id':self.nc.topic_set.first().pk})
response = self.client.get(url)
self.assertEqual(response.status_code,200)
q = PyQuery(response.content)
self.assertEqual(q('#id_subject').attr['value'],'Test Topic')
response = self.client.post(url,{'subject':'Test Topic Modified', 'accepting_feedback':False, 'audience':'nominees'})
self.assertEqual(response.status_code,302)
self.assertEqual(self.nc.topic_set.first().subject,'Test Topic Modified')
self.assertEqual(self.nc.topic_set.first().accepting_feedback, False)
self.assertEqual(self.nc.topic_set.first().audience.slug,'nominees')
self.client.logout()
url = reverse('ietf.nomcom.views.list_topics',kwargs={'year':self.nc.year()})
login_testing_unauthorized(self,self.chair.user.username,url)
response=self.client.get(url)
self.assertEqual(response.status_code,200)
self.assertIn('Test Topic Modified', unicontent(response))
self.client.logout()
url = reverse('ietf.nomcom.views.remove_topic', kwargs={'year':self.nc.year(),'topic_id':self.nc.topic_set.first().pk})
login_testing_unauthorized(self,self.chair.user.username,url)
response=self.client.get(url)
self.assertEqual(response.status_code,200)
self.assertIn('Test Topic Modified', unicontent(response))
response=self.client.post(url,{'remove':1})
self.assertEqual(response.status_code,302)
self.assertFalse(self.nc.topic_set.exists())
def testClassifyTopicFeedback(self):
topic = TopicFactory(nomcom=self.nc)
feedback = FeedbackFactory(nomcom=self.nc,type_id=None)
url = reverse('ietf.nomcom.views.view_feedback_pending',kwargs={'year':self.nc.year() })
login_testing_unauthorized(self, self.chair.user.username, url)
provide_private_key_to_test_client(self)
response = self.client.post(url, {'form-TOTAL_FORMS':1,
'form-INITIAL_FORMS':1,
'end':'Save feedback',
'form-0-id': feedback.id,
'form-0-type': 'comment',
})
self.assertIn('You must choose at least one Nominee or Topic', unicontent(response))
response = self.client.post(url, {'form-TOTAL_FORMS':1,
'form-INITIAL_FORMS':1,
'end':'Save feedback',
'form-0-id': feedback.id,
'form-0-type': 'comment',
'form-0-topic': '%s'%(topic.id,),
})
self.assertEqual(response.status_code,302)
feedback = Feedback.objects.get(id=feedback.id)
self.assertEqual(feedback.type_id,'comment')
self.assertEqual(topic.feedback_set.count(),1)
def testTopicFeedback(self):
topic = TopicFactory(nomcom=self.nc)
url = reverse('ietf.nomcom.views.public_feedback',kwargs={'year':self.nc.year() })
url += '?topic=%d'%topic.pk
login_testing_unauthorized(self, self.plain_person.user.username, url)
response=self.client.post(url, {'comment_text':'junk', 'confirmation':False})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "alert-success")
self.assertNotContains(response, "feedbackform")
self.assertEqual(topic.feedback_set.count(),1)
def testAudience(self):
for audience in ['nomcom','nominees']:
topic = TopicFactory(nomcom=self.nc,audience_id=audience)
feedback_url = reverse('ietf.nomcom.views.public_feedback',kwargs={'year':self.nc.year() })
login_testing_unauthorized(self, self.plain_person.user.username, feedback_url)
r = self.client.get(feedback_url)
self.assertNotContains(r, topic.subject)
topic_url = feedback_url + '?topic=%d'%topic.pk
r = self.client.get(topic_url)
self.assertEqual(r.status_code,404)
r = self.client.post(topic_url, {'comment_text':'junk', 'confirmation':False})
self.assertEqual(r.status_code,404)
self.client.logout()
if audience == 'nomcom':
valid_user = self.nc.group.role_set.filter(name='member').first().person
else:
valid_user = self.nc.nominee_set.first().person
self.client.login(username=valid_user.user.username,password=valid_user.user.username+"+password")
r = self.client.get(feedback_url)
self.assertContains(r, topic.subject)
r = self.client.get(topic_url)
self.assertEqual(r.status_code,200)
r = self.client.post(topic_url, {'comment_text':'junk', 'confirmation':False})
self.assertEqual(r.status_code,200)
self.assertEqual(topic.feedback_set.count(),1)
self.client.logout()
class EligibilityUnitTests(TestCase):
def test_get_eligibility_date(self):
# No Nomcoms exist:
self.assertEqual(get_eligibility_date(), datetime.date(datetime.date.today().year,5,1))
# a provided date trumps anything in the database
self.assertEqual(get_eligibility_date(date=datetime.date(2001,2,3)), datetime.date(2001,2,3))
n = NomComFactory(group__acronym='nomcom2015',populate_personnel=False)
self.assertEqual(get_eligibility_date(date=datetime.date(2001,2,3)), datetime.date(2001,2,3))
self.assertEqual(get_eligibility_date(nomcom=n, date=datetime.date(2001,2,3)), datetime.date(2001,2,3))
# Now there's a nomcom in the database
self.assertEqual(get_eligibility_date(nomcom=n), datetime.date(2015,5,1))
n.first_call_for_volunteers = datetime.date(2015,5,17)
n.save()
self.assertEqual(get_eligibility_date(nomcom=n), datetime.date(2015,5,17))
# No nomcoms in the database with seated members
self.assertEqual(get_eligibility_date(), datetime.date(datetime.date.today().year,5,1))
RoleFactory(group=n.group,name_id='member')
self.assertEqual(get_eligibility_date(),datetime.date(2016,5,1))
NomComFactory(group__acronym='nomcom2016', populate_personnel=False, first_call_for_volunteers=datetime.date(2016,5,4))
self.assertEqual(get_eligibility_date(),datetime.date(2016,5,4))
this_year = datetime.date.today().year
NomComFactory(group__acronym=f'nomcom{this_year}', first_call_for_volunteers=datetime.date(this_year,5,6))
self.assertEqual(get_eligibility_date(),datetime.date(this_year,5,6))
class rfc8713EligibilityTests(TestCase):
def setUp(self):
self.nomcom = NomComFactory(group__acronym='nomcom2019', populate_personnel=False, first_call_for_volunteers=datetime.date(2019,5,1))
meetings = [ MeetingFactory(date=date,type_id='ietf') for date in (
datetime.date(2019,3,1),
datetime.date(2018,11,1),
datetime.date(2018,7,1),
datetime.date(2018,3,1),
datetime.date(2017,11,1),
)]
self.eligible_people = list()
self.ineligible_people = list()
for combo_len in range(0,6):
for combo in combinations(meetings,combo_len):
p = PersonFactory()
for m in combo:
MeetingRegistrationFactory(person=p, meeting=m)
if combo_len<3:
self.ineligible_people.append(p)
else:
self.eligible_people.append(p)
# No-one is eligible for the other_nomcom
self.other_nomcom = NomComFactory(group__acronym='nomcom2018',first_call_for_volunteers=datetime.date(2018,5,1))
# Someone is eligible at this other_date
self.other_date = datetime.date(2009,5,1)
self.other_people = PersonFactory.create_batch(1)
for date in (datetime.date(2009,3,1), datetime.date(2008,11,1), datetime.date(2008,7,1)):
MeetingRegistrationFactory(person=self.other_people[0],meeting__date=date, meeting__type_id='ietf')
def test_is_person_eligible(self):
for person in self.eligible_people:
self.assertTrue(is_eligible(person,self.nomcom))
self.assertTrue(is_eligible(person))
self.assertFalse(is_eligible(person,nomcom=self.other_nomcom))
self.assertFalse(is_eligible(person,date=self.other_date))
for person in self.ineligible_people:
self.assertFalse(is_eligible(person,self.nomcom))
for person in self.other_people:
self.assertTrue(is_eligible(person,date=self.other_date))
def test_list_eligible(self):
self.assertEqual(set(list_eligible()), set(self.eligible_people))
self.assertEqual(set(list_eligible(self.nomcom)), set(self.eligible_people))
self.assertEqual(set(list_eligible(self.other_nomcom)),set(self.other_people))
self.assertEqual(set(list_eligible(date=self.other_date)),set(self.other_people))
class rfc8788EligibilityTests(TestCase):
def setUp(self):
self.nomcom = NomComFactory(group__acronym='nomcom2020', populate_personnel=False, first_call_for_volunteers=datetime.date(2020,5,1))
meetings = [MeetingFactory(number=number, date=date, type_id='ietf') for number,date in [
('106', datetime.date(2019, 11, 16)),
('105', datetime.date(2019, 7, 20)),
('104', datetime.date(2019, 3, 23)),
('103', datetime.date(2018, 11, 3)),
('102', datetime.date(2018, 7, 14)),
]]
self.eligible_people = list()
self.ineligible_people = list()
for combo_len in range(0,6):
for combo in combinations(meetings,combo_len):
p = PersonFactory()
for m in combo:
MeetingRegistrationFactory(person=p, meeting=m)
if combo_len<3:
self.ineligible_people.append(p)
else:
self.eligible_people.append(p)
def test_is_person_eligible(self):
for person in self.eligible_people:
self.assertTrue(is_eligible(person,self.nomcom))
for person in self.ineligible_people:
self.assertFalse(is_eligible(person,self.nomcom))
def test_list_eligible(self):
self.assertEqual(set(list_eligible(self.nomcom)), set(self.eligible_people))
class rfc8989EligibilityTests(TestCase):
def setUp(self):
self.nomcom = NomComFactory(group__acronym='nomcom2021', populate_personnel=False, first_call_for_volunteers=datetime.date(2021,5,15))
# make_immutable_test_data makes things this test does not want
Role.objects.filter(name_id__in=('chair','secr')).delete()
def test_elig_by_meetings(self):
meetings = [MeetingFactory(number=number, date=date, type_id='ietf') for number,date in [
('110', datetime.date(2021, 3, 6)),
('109', datetime.date(2020, 11, 14)),
('108', datetime.date(2020, 7, 25)),
('107', datetime.date(2020, 3, 21)),
('106', datetime.date(2019, 11, 16)),
]]
eligible_people = list()
ineligible_people = list()
for combo_len in range(0,6):
for combo in combinations(meetings,combo_len):
p = PersonFactory()
for m in combo:
MeetingRegistrationFactory(person=p, meeting=m)
if combo_len<3:
ineligible_people.append(p)
else:
eligible_people.append(p)
self.assertEqual(set(eligible_people),set(list_eligible(self.nomcom)))
for person in eligible_people:
self.assertTrue(is_eligible(person,self.nomcom))
for person in ineligible_people:
self.assertFalse(is_eligible(person,self.nomcom))
def test_elig_by_office_active_groups(self):
before_elig_date = self.nomcom.first_call_for_volunteers - datetime.timedelta(days=5)
chair = RoleFactory(name_id='chair',group__time=before_elig_date).person
secr = RoleFactory(name_id='secr',group__time=before_elig_date).person
nobody=PersonFactory()
self.assertTrue(is_eligible(person=chair,nomcom=self.nomcom))
self.assertTrue(is_eligible(person=secr,nomcom=self.nomcom))
self.assertFalse(is_eligible(person=nobody,nomcom=self.nomcom))
self.assertEqual(set([chair,secr]), set(list_eligible(nomcom=self.nomcom)))
def test_elig_by_office_edge(self):
elig_date=get_eligibility_date(self.nomcom)
day_after = elig_date + datetime.timedelta(days=1)
two_days_after = elig_date + datetime.timedelta(days=2)
group = GroupFactory(time=two_days_after)
GroupHistoryFactory(group=group,time=day_after)
after_chair = RoleFactory(name_id='chair',group=group).person
self.assertFalse(is_eligible(person=after_chair,nomcom=self.nomcom))
def test_elig_by_office_closed_groups(self):
elig_date=get_eligibility_date(self.nomcom)
day_before = elig_date-datetime.timedelta(days=1)
year_before = datetime.date(elig_date.year-1,elig_date.month,elig_date.day)
three_years_before = datetime.date(elig_date.year-3,elig_date.month,elig_date.day)
just_after_three_years_before = three_years_before + datetime.timedelta(days=1)
just_before_three_years_before = three_years_before - datetime.timedelta(days=1)
eligible = list()
ineligible = list()
p1 = RoleHistoryFactory(
name_id='chair',
group__time=day_before,
group__group__state_id='conclude',
).person
eligible.append(p1)
p2 = RoleHistoryFactory(
name_id='secr',
group__time=year_before,
group__group__state_id='conclude',
).person
eligible.append(p2)
p3 = RoleHistoryFactory(
name_id='secr',
group__time=just_after_three_years_before,
group__group__state_id='conclude',
).person
eligible.append(p3)
p4 = RoleHistoryFactory(
name_id='chair',
group__time=three_years_before,
group__group__state_id='conclude',
).person
eligible.append(p4)
p5 = RoleHistoryFactory(
name_id='chair',
group__time=just_before_three_years_before,
group__group__state_id='conclude',
).person
ineligible.append(p5)
for person in eligible:
self.assertTrue(is_eligible(person,self.nomcom))
for person in ineligible:
self.assertFalse(is_eligible(person,self.nomcom))
self.assertEqual(set(list_eligible(nomcom=self.nomcom)),set(eligible))
def test_elig_by_author(self):
elig_date = get_eligibility_date(self.nomcom)
last_date = elig_date
first_date = datetime.date(last_date.year-5,last_date.month,last_date.day)
day_after_last_date = last_date+datetime.timedelta(days=1)
day_before_first_date = first_date-datetime.timedelta(days=1)
middle_date = datetime.date(last_date.year-3,last_date.month,last_date.day)
eligible = set()
ineligible = set()
p = PersonFactory()
ineligible.add(p)
p = PersonFactory()
da = WgDocumentAuthorFactory(person=p)
DocEventFactory(type='published_rfc',doc=da.document,time=middle_date)
ineligible.add(p)
p = PersonFactory()
da = WgDocumentAuthorFactory(person=p)
DocEventFactory(type='iesg_approved',doc=da.document,time=last_date)
da = WgDocumentAuthorFactory(person=p)
DocEventFactory(type='published_rfc',doc=da.document,time=first_date)
eligible.add(p)
p = PersonFactory()
da = WgDocumentAuthorFactory(person=p)
DocEventFactory(type='iesg_approved',doc=da.document,time=middle_date)
da = WgDocumentAuthorFactory(person=p)
DocEventFactory(type='published_rfc',doc=da.document,time=day_before_first_date)
ineligible.add(p)
p = PersonFactory()
da = WgDocumentAuthorFactory(person=p)
DocEventFactory(type='iesg_approved',doc=da.document,time=day_after_last_date)
da = WgDocumentAuthorFactory(person=p)
DocEventFactory(type='published_rfc',doc=da.document,time=middle_date)
ineligible.add(p)
for person in eligible:
self.assertTrue(is_eligible(person,self.nomcom))
for person in ineligible:
self.assertFalse(is_eligible(person,self.nomcom))
self.assertEqual(set(list_eligible(nomcom=self.nomcom)),set(eligible))
class VolunteerTests(TestCase):
def test_volunteer(self):
url = reverse('ietf.nomcom.views.volunteer')
person = PersonFactory()
login_testing_unauthorized(self, person.user.username, url)
r = self.client.get(url)
self.assertContains(r, 'NomCom is not accepting volunteers at this time', status_code=200)
year = datetime.date.today().year
nomcom = NomComFactory(group__acronym=f'nomcom{year}', is_accepting_volunteers=False)
r = self.client.get(url)
self.assertContains(r, 'NomCom is not accepting volunteers at this time', status_code=200)
nomcom.is_accepting_volunteers = True
nomcom.save()
MeetingRegistrationFactory(person=person, affiliation='mtg_affiliation')
r = self.client.get(url)
self.assertContains(r, 'Volunteer for NomCom', status_code=200)
self.assertContains(r, 'mtg_affiliation')
r=self.client.post(url, dict(nomcoms=[nomcom.pk], affiliation=''))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(q('form div.has-error #id_affiliation'))
r=self.client.post(url, dict(nomcoms=[], affiliation='something'))
q = PyQuery(r.content)
self.assertTrue(q('form div.has-error #id_nomcoms'))
r=self.client.post(url, dict(nomcoms=[nomcom.pk], affiliation='something'))
self.assertRedirects(r, reverse('ietf.ietfauth.views.profile'))
self.assertEqual(person.volunteer_set.get(nomcom=nomcom).affiliation, 'something')
r=self.client.get(url)
self.assertContains(r, 'already volunteered', status_code=200)
person.volunteer_set.all().delete()
nomcom2 = NomComFactory(group__acronym=f'nomcom{year-1}', is_accepting_volunteers=True)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('#id_nomcoms div.checkbox')), 2)
r = self.client.post(url, dict(nomcoms=[nomcom.pk, nomcom2.pk], affiliation='something'))
self.assertRedirects(r, reverse('ietf.ietfauth.views.profile'))
self.assertEqual(person.volunteer_set.count(), 2)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertFalse(q('form div#id_nomcoms'))
self.assertIn(f'{nomcom.year()}/', q('#already-volunteered').text())
self.assertIn(f'{nomcom2.year()}/', q('#already-volunteered').text())
person.volunteer_set.all().delete()
r=self.client.post(url, dict(nomcoms=[nomcom2.pk], affiliation='something'))
self.assertRedirects(r, reverse('ietf.ietfauth.views.profile'))
self.assertEqual(person.volunteer_set.count(), 1)
self.assertEqual(person.volunteer_set.first().nomcom, nomcom2)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('#id_nomcoms div.checkbox')), 1)
self.assertNotIn(f'{nomcom.year()}/', q('#already-volunteered').text())
self.assertIn(f'{nomcom2.year()}/', q('#already-volunteered').text())
def test_suggest_affiliation(self):
person = PersonFactory()
self.assertEqual(suggest_affiliation(person), '')
da = DocumentAuthorFactory(person=person,affiliation='auth_affil')
NewRevisionDocEventFactory(doc=da.document)
self.assertEqual(suggest_affiliation(person), 'auth_affil')
nc = NomComFactory()
nc.volunteer_set.create(person=person,affiliation='volunteer_affil')
self.assertEqual(suggest_affiliation(person), 'volunteer_affil')
MeetingRegistrationFactory(person=person, affiliation='meeting_affil')
self.assertEqual(suggest_affiliation(person), 'meeting_affil')
| 47.894945 | 193 | 0.633306 |
acf604270bd6cc95bde3e62bf80f80f693e06c31 | 5,630 | py | Python | pype32/datadirs.py | crackinglandia/pype32 | 192fd14dfc0dd36d953739a81c17fbaf5e3d6076 | [
"BSD-3-Clause"
] | 72 | 2015-04-07T13:23:03.000Z | 2021-12-14T05:58:53.000Z | pype32/datadirs.py | larsborn/pype32 | 192fd14dfc0dd36d953739a81c17fbaf5e3d6076 | [
"BSD-3-Clause"
] | 17 | 2015-04-27T22:26:56.000Z | 2019-04-04T07:38:06.000Z | pype32/datadirs.py | crackinglandia/pype32 | 192fd14dfc0dd36d953739a81c17fbaf5e3d6076 | [
"BSD-3-Clause"
] | 25 | 2015-02-27T14:22:27.000Z | 2021-09-03T06:45:09.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Nahuel Riva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Data directory classes.
"""
__revision__ = "$Id$"
__all__ = [
"Directory",
"DataDirectory",
]
import consts
import excep
import datatypes
from struct import pack
dirs = ["EXPORT_DIRECTORY","IMPORT_DIRECTORY","RESOURCE_DIRECTORY","EXCEPTION_DIRECTORY","SECURITY_DIRECTORY",\
"RELOCATION_DIRECTORY","DEBUG_DIRECTORY","ARCHITECTURE_DIRECTORY","RESERVED_DIRECTORY","TLS_DIRECTORY",\
"CONFIGURATION_DIRECTORY","BOUND_IMPORT_DIRECTORY","IAT_DIRECTORY","DELAY_IMPORT_DIRECTORY","NET_METADATA_DIRECTORY",\
"RESERVED_DIRECTORY"]
class Directory(object):
"""Directory object."""
def __init__(self, shouldPack = True):
"""
Class representation of the C{IMAGE_DATA_DIRECTORY} structure.
@see: U{http://msdn.microsoft.com/es-es/library/windows/desktop/ms680305%28v=vs.85%29.aspx}
@type shouldPack: bool
@param shouldPack: If set to C{True} the L{Directory} object will be packed. If set to C{False} the object won't be packed.
"""
self.name = datatypes.String("")
self.rva = datatypes.DWORD(0) #: L{DWORD} rva.
self.size = datatypes.DWORD(0) #: L{DWORD} size.
self.info = None #: This variable holds the information of the directory.
self.shouldPack = shouldPack
def __str__(self):
return str(self.rva) + str(self.size)
def __len__(self):
return len(str(self))
def __dir__(self):
return sorted(self.__dict__.keys())
@staticmethod
def parse(readDataInstance):
"""
Returns a L{Directory}-like object.
@type readDataInstance: L{ReadData}
@param readDataInstance: L{ReadData} object to read from.
@rtype: L{Directory}
@return: L{Directory} object.
"""
d = Directory()
d.rva.value = readDataInstance.readDword()
d.size.value = readDataInstance.readDword()
return d
def getType(self):
"""Returns a value that identifies the L{Directory} object."""
return consts.IMAGE_DATA_DIRECTORY
class DataDirectory(list):
"""DataDirectory object."""
def __init__(self, shouldPack = True):
"""
Array of L{Directory} objects.
@type shouldPack: bool
@param shouldPack: If set to C{True} the L{DataDirectory} object will be packed. If set to C{False} the object won't packed.
"""
self.shouldPack = shouldPack
for i in range(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES):
dir = Directory()
dir.name.value = dirs[i]
self.append(dir)
def __str__(self):
packedRvasAndSizes = ""
for directory in self:
packedRvasAndSizes += str(directory)
return packedRvasAndSizes
@staticmethod
def parse(readDataInstance):
"""Returns a L{DataDirectory}-like object.
@type readDataInstance: L{ReadData}
@param readDataInstance: L{ReadData} object to read from.
@rtype: L{DataDirectory}
@return: The L{DataDirectory} object containing L{consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES} L{Directory} objects.
@raise DirectoryEntriesLengthException: The L{ReadData} instance has an incorrect number of L{Directory} objects.
"""
if len(readDataInstance) == consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8:
newDataDirectory = DataDirectory()
for i in range(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES):
newDataDirectory[i].name.value = dirs[i]
newDataDirectory[i].rva.value = readDataInstance.readDword()
newDataDirectory[i].size.value = readDataInstance.readDword()
else:
raise excep.DirectoryEntriesLengthException("The IMAGE_NUMBEROF_DIRECTORY_ENTRIES does not match with the length of the passed argument.")
return newDataDirectory
| 39.647887 | 150 | 0.670515 |
acf604653c9c2175f504adb67440d018004fb92e | 100,450 | py | Python | xarray/tests/test_plot.py | aijams/xarray | 4434f034a36886609ac0492d3307954163ecbea6 | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xarray/tests/test_plot.py | aijams/xarray | 4434f034a36886609ac0492d3307954163ecbea6 | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2021-07-26T23:07:44.000Z | 2022-02-14T23:07:25.000Z | xarray/tests/test_plot.py | aijams/xarray | 4434f034a36886609ac0492d3307954163ecbea6 | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import contextlib
import inspect
from copy import copy
from datetime import datetime
from typing import Any, Dict, Union
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import xarray.plot as xplt
from xarray import DataArray, Dataset
from xarray.plot.dataset_plot import _infer_meta_data
from xarray.plot.plot import _infer_interval_breaks
from xarray.plot.utils import (
_build_discrete_cmap,
_color_palette,
_determine_cmap_params,
_maybe_gca,
get_axis,
label_from_attrs,
)
from . import (
assert_array_equal,
assert_equal,
has_nc_time_axis,
requires_cartopy,
requires_cftime,
requires_matplotlib,
requires_matplotlib_3_3_0,
requires_nc_time_axis,
requires_seaborn,
)
# import mpl and change the backend before other mpl imports
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
import mpl_toolkits # type: ignore
except ImportError:
pass
try:
import cartopy
except ImportError:
pass
@contextlib.contextmanager
def figure_context(*args, **kwargs):
"""context manager which autocloses a figure (even if the test failed)"""
try:
yield None
finally:
plt.close("all")
@pytest.fixture(scope="function", autouse=True)
def test_all_figures_closed():
"""meta-test to ensure all figures are closed at the end of a test
Notes: Scope is kept to module (only invoke this function once per test
module) else tests cannot be run in parallel (locally). Disadvantage: only
catches one open figure per run. May still give a false positive if tests
are run in parallel.
"""
yield None
open_figs = len(plt.get_fignums())
if open_figs:
raise RuntimeError(
f"tests did not close all figures ({open_figs} figures open)"
)
@pytest.mark.flaky
@pytest.mark.skip(reason="maybe flaky")
def text_in_fig():
"""
Return the set of all text in the figure
"""
return {t.get_text() for t in plt.gcf().findobj(mpl.text.Text)}
def find_possible_colorbars():
# nb. this function also matches meshes from pcolormesh
return plt.gcf().findobj(mpl.collections.QuadMesh)
def substring_in_axes(substring, ax):
"""
Return True if a substring is found anywhere in an axes
"""
alltxt = {t.get_text() for t in ax.findobj(mpl.text.Text)}
for txt in alltxt:
if substring in txt:
return True
return False
def substring_not_in_axes(substring, ax):
"""
Return True if a substring is not found anywhere in an axes
"""
alltxt = {t.get_text() for t in ax.findobj(mpl.text.Text)}
check = [(substring not in txt) for txt in alltxt]
return all(check)
def easy_array(shape, start=0, stop=1):
"""
Make an array with desired shape using np.linspace
shape is a tuple like (2, 3)
"""
a = np.linspace(start, stop, num=np.prod(shape))
return a.reshape(shape)
def get_colorbar_label(colorbar):
if colorbar.orientation == "vertical":
return colorbar.ax.get_ylabel()
else:
return colorbar.ax.get_xlabel()
@requires_matplotlib
class PlotTestCase:
@pytest.fixture(autouse=True)
def setup(self):
yield
# Remove all matplotlib figures
plt.close("all")
def pass_in_axis(self, plotmethod, subplot_kw=None):
fig, axes = plt.subplots(ncols=2, subplot_kw=subplot_kw)
plotmethod(ax=axes[0])
assert axes[0].has_data()
@pytest.mark.slow
def imshow_called(self, plotmethod):
plotmethod()
images = plt.gca().findobj(mpl.image.AxesImage)
return len(images) > 0
def contourf_called(self, plotmethod):
plotmethod()
paths = plt.gca().findobj(mpl.collections.PathCollection)
return len(paths) > 0
class TestPlot(PlotTestCase):
@pytest.fixture(autouse=True)
def setup_array(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_accessor(self):
from ..plot.plot import _PlotMethods
assert DataArray.plot is _PlotMethods
assert isinstance(self.darray.plot, _PlotMethods)
def test_label_from_attrs(self):
da = self.darray.copy()
assert "" == label_from_attrs(da)
da.name = "a"
da.attrs["units"] = "a_units"
da.attrs["long_name"] = "a_long_name"
da.attrs["standard_name"] = "a_standard_name"
assert "a_long_name [a_units]" == label_from_attrs(da)
da.attrs.pop("long_name")
assert "a_standard_name [a_units]" == label_from_attrs(da)
da.attrs.pop("units")
assert "a_standard_name" == label_from_attrs(da)
da.attrs["units"] = "a_units"
da.attrs.pop("standard_name")
assert "a [a_units]" == label_from_attrs(da)
da.attrs.pop("units")
assert "a" == label_from_attrs(da)
def test1d(self):
self.darray[:, 0, 0].plot()
with pytest.raises(ValueError, match=r"x must be one of None, 'dim_0'"):
self.darray[:, 0, 0].plot(x="dim_1")
with pytest.raises(TypeError, match=r"complex128"):
(self.darray[:, 0, 0] + 1j).plot()
def test_1d_bool(self):
xr.ones_like(self.darray[:, 0, 0], dtype=bool).plot()
def test_1d_x_y_kw(self):
z = np.arange(10)
da = DataArray(np.cos(z), dims=["z"], coords=[z], name="f")
xy = [[None, None], [None, "z"], ["z", None]]
f, ax = plt.subplots(3, 1)
for aa, (x, y) in enumerate(xy):
da.plot(x=x, y=y, ax=ax.flat[aa])
with pytest.raises(ValueError, match=r"Cannot specify both"):
da.plot(x="z", y="z")
error_msg = "must be one of None, 'z'"
with pytest.raises(ValueError, match=rf"x {error_msg}"):
da.plot(x="f")
with pytest.raises(ValueError, match=rf"y {error_msg}"):
da.plot(y="f")
def test_multiindex_level_as_coord(self):
da = xr.DataArray(
np.arange(5),
dims="x",
coords=dict(a=("x", np.arange(5)), b=("x", np.arange(5, 10))),
)
da = da.set_index(x=["a", "b"])
for x in ["a", "b"]:
h = da.plot(x=x)[0]
assert_array_equal(h.get_xdata(), da[x].values)
for y in ["a", "b"]:
h = da.plot(y=y)[0]
assert_array_equal(h.get_ydata(), da[y].values)
# Test for bug in GH issue #2725
def test_infer_line_data(self):
current = DataArray(
name="I",
data=np.array([5, 8]),
dims=["t"],
coords={
"t": (["t"], np.array([0.1, 0.2])),
"V": (["t"], np.array([100, 200])),
},
)
# Plot current against voltage
line = current.plot.line(x="V")[0]
assert_array_equal(line.get_xdata(), current.coords["V"].values)
# Plot current against time
line = current.plot.line()[0]
assert_array_equal(line.get_xdata(), current.coords["t"].values)
def test_line_plot_along_1d_coord(self):
# Test for bug in GH #3334
x_coord = xr.DataArray(data=[0.1, 0.2], dims=["x"])
t_coord = xr.DataArray(data=[10, 20], dims=["t"])
da = xr.DataArray(
data=np.array([[0, 1], [5, 9]]),
dims=["x", "t"],
coords={"x": x_coord, "time": t_coord},
)
line = da.plot(x="time", hue="x")[0]
assert_array_equal(line.get_xdata(), da.coords["time"].values)
line = da.plot(y="time", hue="x")[0]
assert_array_equal(line.get_ydata(), da.coords["time"].values)
def test_line_plot_wrong_hue(self):
da = xr.DataArray(
data=np.array([[0, 1], [5, 9]]),
dims=["x", "t"],
)
with pytest.raises(ValueError, match="hue must be one of"):
da.plot(x="t", hue="wrong_coord")
def test_2d_line(self):
with pytest.raises(ValueError, match=r"hue"):
self.darray[:, :, 0].plot.line()
self.darray[:, :, 0].plot.line(hue="dim_1")
self.darray[:, :, 0].plot.line(x="dim_1")
self.darray[:, :, 0].plot.line(y="dim_1")
self.darray[:, :, 0].plot.line(x="dim_0", hue="dim_1")
self.darray[:, :, 0].plot.line(y="dim_0", hue="dim_1")
with pytest.raises(ValueError, match=r"Cannot"):
self.darray[:, :, 0].plot.line(x="dim_1", y="dim_0", hue="dim_1")
def test_2d_line_accepts_legend_kw(self):
self.darray[:, :, 0].plot.line(x="dim_0", add_legend=False)
assert not plt.gca().get_legend()
plt.cla()
self.darray[:, :, 0].plot.line(x="dim_0", add_legend=True)
assert plt.gca().get_legend()
# check whether legend title is set
assert plt.gca().get_legend().get_title().get_text() == "dim_1"
def test_2d_line_accepts_x_kw(self):
self.darray[:, :, 0].plot.line(x="dim_0")
assert plt.gca().get_xlabel() == "dim_0"
plt.cla()
self.darray[:, :, 0].plot.line(x="dim_1")
assert plt.gca().get_xlabel() == "dim_1"
def test_2d_line_accepts_hue_kw(self):
self.darray[:, :, 0].plot.line(hue="dim_0")
assert plt.gca().get_legend().get_title().get_text() == "dim_0"
plt.cla()
self.darray[:, :, 0].plot.line(hue="dim_1")
assert plt.gca().get_legend().get_title().get_text() == "dim_1"
def test_2d_coords_line_plot(self):
lon, lat = np.meshgrid(np.linspace(-20, 20, 5), np.linspace(0, 30, 4))
lon += lat / 10
lat += lon / 10
da = xr.DataArray(
np.arange(20).reshape(4, 5),
dims=["y", "x"],
coords={"lat": (("y", "x"), lat), "lon": (("y", "x"), lon)},
)
with figure_context():
hdl = da.plot.line(x="lon", hue="x")
assert len(hdl) == 5
with figure_context():
hdl = da.plot.line(x="lon", hue="y")
assert len(hdl) == 4
with pytest.raises(ValueError, match="For 2D inputs, hue must be a dimension"):
da.plot.line(x="lon", hue="lat")
def test_2d_coord_line_plot_coords_transpose_invariant(self):
# checks for bug reported in GH #3933
x = np.arange(10)
y = np.arange(20)
ds = xr.Dataset(coords={"x": x, "y": y})
for z in [ds.y + ds.x, ds.x + ds.y]:
ds = ds.assign_coords(z=z)
ds["v"] = ds.x + ds.y
ds["v"].plot.line(y="z", hue="x")
def test_2d_before_squeeze(self):
a = DataArray(easy_array((1, 5)))
a.plot()
def test2d_uniform_calls_imshow(self):
assert self.imshow_called(self.darray[:, :, 0].plot.imshow)
@pytest.mark.slow
def test2d_nonuniform_calls_contourf(self):
a = self.darray[:, :, 0]
a.coords["dim_1"] = [2, 1, 89]
assert self.contourf_called(a.plot.contourf)
def test2d_1d_2d_coordinates_contourf(self):
sz = (20, 10)
depth = easy_array(sz)
a = DataArray(
easy_array(sz),
dims=["z", "time"],
coords={"depth": (["z", "time"], depth), "time": np.linspace(0, 1, sz[1])},
)
a.plot.contourf(x="time", y="depth")
a.plot.contourf(x="depth", y="time")
def test2d_1d_2d_coordinates_pcolormesh(self):
# Test with equal coordinates to catch bug from #5097
sz = 10
y2d, x2d = np.meshgrid(np.arange(sz), np.arange(sz))
a = DataArray(
easy_array((sz, sz)),
dims=["x", "y"],
coords={"x2d": (["x", "y"], x2d), "y2d": (["x", "y"], y2d)},
)
for x, y in [
("x", "y"),
("y", "x"),
("x2d", "y"),
("y", "x2d"),
("x", "y2d"),
("y2d", "x"),
("x2d", "y2d"),
("y2d", "x2d"),
]:
p = a.plot.pcolormesh(x=x, y=y)
v = p.get_paths()[0].vertices
# Check all vertices are different, except last vertex which should be the
# same as the first
_, unique_counts = np.unique(v[:-1], axis=0, return_counts=True)
assert np.all(unique_counts == 1)
def test_contourf_cmap_set(self):
a = DataArray(easy_array((4, 4)), dims=["z", "time"])
cmap = mpl.cm.viridis
# use copy to ensure cmap is not changed by contourf()
# Set vmin and vmax so that _build_discrete_colormap is called with
# extend='both'. extend is passed to
# mpl.colors.from_levels_and_colors(), which returns a result with
# sensible under and over values if extend='both', but not if
# extend='neither' (but if extend='neither' the under and over values
# would not be used because the data would all be within the plotted
# range)
pl = a.plot.contourf(cmap=copy(cmap), vmin=0.1, vmax=0.9)
# check the set_bad color
assert_array_equal(
pl.cmap(np.ma.masked_invalid([np.nan]))[0],
cmap(np.ma.masked_invalid([np.nan]))[0],
)
# check the set_under color
assert pl.cmap(-np.inf) == cmap(-np.inf)
# check the set_over color
assert pl.cmap(np.inf) == cmap(np.inf)
def test_contourf_cmap_set_with_bad_under_over(self):
a = DataArray(easy_array((4, 4)), dims=["z", "time"])
# make a copy here because we want a local cmap that we will modify.
cmap = copy(mpl.cm.viridis)
cmap.set_bad("w")
# check we actually changed the set_bad color
assert np.all(
cmap(np.ma.masked_invalid([np.nan]))[0]
!= mpl.cm.viridis(np.ma.masked_invalid([np.nan]))[0]
)
cmap.set_under("r")
# check we actually changed the set_under color
assert cmap(-np.inf) != mpl.cm.viridis(-np.inf)
cmap.set_over("g")
# check we actually changed the set_over color
assert cmap(np.inf) != mpl.cm.viridis(-np.inf)
# copy to ensure cmap is not changed by contourf()
pl = a.plot.contourf(cmap=copy(cmap))
# check the set_bad color has been kept
assert_array_equal(
pl.cmap(np.ma.masked_invalid([np.nan]))[0],
cmap(np.ma.masked_invalid([np.nan]))[0],
)
# check the set_under color has been kept
assert pl.cmap(-np.inf) == cmap(-np.inf)
# check the set_over color has been kept
assert pl.cmap(np.inf) == cmap(np.inf)
def test3d(self):
self.darray.plot()
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot)
def test__infer_interval_breaks(self):
assert_array_equal([-0.5, 0.5, 1.5], _infer_interval_breaks([0, 1]))
assert_array_equal(
[-0.5, 0.5, 5.0, 9.5, 10.5], _infer_interval_breaks([0, 1, 9, 10])
)
assert_array_equal(
pd.date_range("20000101", periods=4) - np.timedelta64(12, "h"),
_infer_interval_breaks(pd.date_range("20000101", periods=3)),
)
# make a bounded 2D array that we will center and re-infer
xref, yref = np.meshgrid(np.arange(6), np.arange(5))
cx = (xref[1:, 1:] + xref[:-1, :-1]) / 2
cy = (yref[1:, 1:] + yref[:-1, :-1]) / 2
x = _infer_interval_breaks(cx, axis=1)
x = _infer_interval_breaks(x, axis=0)
y = _infer_interval_breaks(cy, axis=1)
y = _infer_interval_breaks(y, axis=0)
np.testing.assert_allclose(xref, x)
np.testing.assert_allclose(yref, y)
# test that ValueError is raised for non-monotonic 1D inputs
with pytest.raises(ValueError):
_infer_interval_breaks(np.array([0, 2, 1]), check_monotonic=True)
def test_geo_data(self):
# Regression test for gh2250
# Realistic coordinates taken from the example dataset
lat = np.array(
[
[16.28, 18.48, 19.58, 19.54, 18.35],
[28.07, 30.52, 31.73, 31.68, 30.37],
[39.65, 42.27, 43.56, 43.51, 42.11],
[50.52, 53.22, 54.55, 54.50, 53.06],
]
)
lon = np.array(
[
[-126.13, -113.69, -100.92, -88.04, -75.29],
[-129.27, -115.62, -101.54, -87.32, -73.26],
[-133.10, -118.00, -102.31, -86.42, -70.76],
[-137.85, -120.99, -103.28, -85.28, -67.62],
]
)
data = np.sqrt(lon ** 2 + lat ** 2)
da = DataArray(
data,
dims=("y", "x"),
coords={"lon": (("y", "x"), lon), "lat": (("y", "x"), lat)},
)
da.plot(x="lon", y="lat")
ax = plt.gca()
assert ax.has_data()
da.plot(x="lat", y="lon")
ax = plt.gca()
assert ax.has_data()
def test_datetime_dimension(self):
nrow = 3
ncol = 4
time = pd.date_range("2000-01-01", periods=nrow)
a = DataArray(
easy_array((nrow, ncol)), coords=[("time", time), ("y", range(ncol))]
)
a.plot()
ax = plt.gca()
assert ax.has_data()
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"])
d.coords["z"] = list("abcd")
g = d.plot(x="x", y="y", col="z", col_wrap=2, cmap="cool")
assert_array_equal(g.axes.shape, [2, 2])
for ax in g.axes.flat:
assert ax.has_data()
with pytest.raises(ValueError, match=r"[Ff]acet"):
d.plot(x="x", y="y", col="z", ax=plt.gca())
with pytest.raises(ValueError, match=r"[Ff]acet"):
d[0].plot(x="x", y="y", col="z", ax=plt.gca())
@pytest.mark.slow
def test_subplot_kws(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"])
d.coords["z"] = list("abcd")
g = d.plot(
x="x",
y="y",
col="z",
col_wrap=2,
cmap="cool",
subplot_kws=dict(facecolor="r"),
)
for ax in g.axes.flat:
# mpl V2
assert ax.get_facecolor()[0:3] == mpl.colors.to_rgb("r")
@pytest.mark.slow
def test_plot_size(self):
self.darray[:, 0, 0].plot(figsize=(13, 5))
assert tuple(plt.gcf().get_size_inches()) == (13, 5)
self.darray.plot(figsize=(13, 5))
assert tuple(plt.gcf().get_size_inches()) == (13, 5)
self.darray.plot(size=5)
assert plt.gcf().get_size_inches()[1] == 5
self.darray.plot(size=5, aspect=2)
assert tuple(plt.gcf().get_size_inches()) == (10, 5)
with pytest.raises(ValueError, match=r"cannot provide both"):
self.darray.plot(ax=plt.gca(), figsize=(3, 4))
with pytest.raises(ValueError, match=r"cannot provide both"):
self.darray.plot(size=5, figsize=(3, 4))
with pytest.raises(ValueError, match=r"cannot provide both"):
self.darray.plot(size=5, ax=plt.gca())
with pytest.raises(ValueError, match=r"cannot provide `aspect`"):
self.darray.plot(aspect=1)
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
g = d.plot(x="x", y="y", col="columns", row="rows")
assert_array_equal(g.axes.shape, [3, 2])
for ax in g.axes.flat:
assert ax.has_data()
with pytest.raises(ValueError, match=r"[Ff]acet"):
d.plot(x="x", y="y", col="columns", ax=plt.gca())
def test_coord_with_interval(self):
"""Test line plot with intervals."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot()
def test_coord_with_interval_x(self):
"""Test line plot with intervals explicitly on x axis."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot(x="dim_0_bins")
def test_coord_with_interval_y(self):
"""Test line plot with intervals explicitly on y axis."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot(y="dim_0_bins")
def test_coord_with_interval_xy(self):
"""Test line plot with intervals on both x and y axes."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).dim_0_bins.plot()
@pytest.mark.parametrize("dim", ("x", "y"))
def test_labels_with_units_with_interval(self, dim):
"""Test line plot with intervals and a units attribute."""
bins = [-1, 0, 1, 2]
arr = self.darray.groupby_bins("dim_0", bins).mean(...)
arr.dim_0_bins.attrs["units"] = "m"
(mappable,) = arr.plot(**{dim: "dim_0_bins"})
ax = mappable.figure.gca()
actual = getattr(ax, f"get_{dim}label")()
expected = "dim_0_bins_center [m]"
assert actual == expected
class TestPlot1D(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
d = [0, 1.1, 0, 2]
self.darray = DataArray(d, coords={"period": range(len(d))}, dims="period")
self.darray.period.attrs["units"] = "s"
def test_xlabel_is_index_name(self):
self.darray.plot()
assert "period [s]" == plt.gca().get_xlabel()
def test_no_label_name_on_x_axis(self):
self.darray.plot(y="period")
assert "" == plt.gca().get_xlabel()
def test_no_label_name_on_y_axis(self):
self.darray.plot()
assert "" == plt.gca().get_ylabel()
def test_ylabel_is_data_name(self):
self.darray.name = "temperature"
self.darray.attrs["units"] = "degrees_Celsius"
self.darray.plot()
assert "temperature [degrees_Celsius]" == plt.gca().get_ylabel()
def test_xlabel_is_data_name(self):
self.darray.name = "temperature"
self.darray.attrs["units"] = "degrees_Celsius"
self.darray.plot(y="period")
assert "temperature [degrees_Celsius]" == plt.gca().get_xlabel()
def test_format_string(self):
self.darray.plot.line("ro")
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.line)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray([1, 2, 3], {"letter": ["a", "b", "c"]}, dims="letter")
with pytest.raises(TypeError, match=r"[Pp]lot"):
a.plot.line()
def test_primitive_returned(self):
p = self.darray.plot.line()
assert isinstance(p[0], mpl.lines.Line2D)
@pytest.mark.slow
def test_plot_nans(self):
self.darray[1] = np.nan
self.darray.plot.line()
def test_x_ticks_are_rotated_for_time(self):
time = pd.date_range("2000-01-01", "2000-01-10")
a = DataArray(np.arange(len(time)), [("t", time)])
a.plot.line()
rotation = plt.gca().get_xticklabels()[0].get_rotation()
assert rotation != 0
def test_xyincrease_false_changes_axes(self):
self.darray.plot.line(xincrease=False, yincrease=False)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[1] - xlim[0], ylim[1] - ylim[0]
assert all(x < 0 for x in diffs)
def test_slice_in_title(self):
self.darray.coords["d"] = 10
self.darray.plot.line()
title = plt.gca().get_title()
assert "d = 10" == title
class TestPlotStep(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_step(self):
hdl = self.darray[0, 0].plot.step()
assert "steps" in hdl[0].get_drawstyle()
@pytest.mark.parametrize("where", ["pre", "post", "mid"])
def test_step_with_where(self, where):
hdl = self.darray[0, 0].plot.step(where=where)
assert hdl[0].get_drawstyle() == f"steps-{where}"
def test_coord_with_interval_step(self):
"""Test step plot with intervals."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot.step()
assert len(plt.gca().lines[0].get_xdata()) == ((len(bins) - 1) * 2)
def test_coord_with_interval_step_x(self):
"""Test step plot with intervals explicitly on x axis."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot.step(x="dim_0_bins")
assert len(plt.gca().lines[0].get_xdata()) == ((len(bins) - 1) * 2)
def test_coord_with_interval_step_y(self):
"""Test step plot with intervals explicitly on y axis."""
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot.step(y="dim_0_bins")
assert len(plt.gca().lines[0].get_xdata()) == ((len(bins) - 1) * 2)
class TestPlotHistogram(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_3d_array(self):
self.darray.plot.hist()
def test_xlabel_uses_name(self):
self.darray.name = "testpoints"
self.darray.attrs["units"] = "testunits"
self.darray.plot.hist()
assert "testpoints [testunits]" == plt.gca().get_xlabel()
def test_title_is_histogram(self):
self.darray.plot.hist()
assert "Histogram" == plt.gca().get_title()
def test_can_pass_in_kwargs(self):
nbins = 5
self.darray.plot.hist(bins=nbins)
assert nbins == len(plt.gca().patches)
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.hist)
def test_primitive_returned(self):
h = self.darray.plot.hist()
assert isinstance(h[-1][0], mpl.patches.Rectangle)
@pytest.mark.slow
def test_plot_nans(self):
self.darray[0, 0, 0] = np.nan
self.darray.plot.hist()
def test_hist_coord_with_interval(self):
(
self.darray.groupby_bins("dim_0", [-1, 0, 1, 2])
.mean(...)
.plot.hist(range=(-1, 2))
)
@requires_matplotlib
class TestDetermineCmapParams:
@pytest.fixture(autouse=True)
def setUp(self):
self.data = np.linspace(0, 1, num=100)
def test_robust(self):
cmap_params = _determine_cmap_params(self.data, robust=True)
assert cmap_params["vmin"] == np.percentile(self.data, 2)
assert cmap_params["vmax"] == np.percentile(self.data, 98)
assert cmap_params["cmap"] == "viridis"
assert cmap_params["extend"] == "both"
assert cmap_params["levels"] is None
assert cmap_params["norm"] is None
def test_center(self):
cmap_params = _determine_cmap_params(self.data, center=0.5)
assert cmap_params["vmax"] - 0.5 == 0.5 - cmap_params["vmin"]
assert cmap_params["cmap"] == "RdBu_r"
assert cmap_params["extend"] == "neither"
assert cmap_params["levels"] is None
assert cmap_params["norm"] is None
def test_cmap_sequential_option(self):
with xr.set_options(cmap_sequential="magma"):
cmap_params = _determine_cmap_params(self.data)
assert cmap_params["cmap"] == "magma"
def test_cmap_sequential_explicit_option(self):
with xr.set_options(cmap_sequential=mpl.cm.magma):
cmap_params = _determine_cmap_params(self.data)
assert cmap_params["cmap"] == mpl.cm.magma
def test_cmap_divergent_option(self):
with xr.set_options(cmap_divergent="magma"):
cmap_params = _determine_cmap_params(self.data, center=0.5)
assert cmap_params["cmap"] == "magma"
def test_nan_inf_are_ignored(self):
cmap_params1 = _determine_cmap_params(self.data)
data = self.data
data[50:55] = np.nan
data[56:60] = np.inf
cmap_params2 = _determine_cmap_params(data)
assert cmap_params1["vmin"] == cmap_params2["vmin"]
assert cmap_params1["vmax"] == cmap_params2["vmax"]
@pytest.mark.slow
def test_integer_levels(self):
data = self.data + 1
# default is to cover full data range but with no guarantee on Nlevels
for level in np.arange(2, 10, dtype=int):
cmap_params = _determine_cmap_params(data, levels=level)
assert cmap_params["vmin"] is None
assert cmap_params["vmax"] is None
assert cmap_params["norm"].vmin == cmap_params["levels"][0]
assert cmap_params["norm"].vmax == cmap_params["levels"][-1]
assert cmap_params["extend"] == "neither"
# with min max we are more strict
cmap_params = _determine_cmap_params(
data, levels=5, vmin=0, vmax=5, cmap="Blues"
)
assert cmap_params["vmin"] is None
assert cmap_params["vmax"] is None
assert cmap_params["norm"].vmin == 0
assert cmap_params["norm"].vmax == 5
assert cmap_params["norm"].vmin == cmap_params["levels"][0]
assert cmap_params["norm"].vmax == cmap_params["levels"][-1]
assert cmap_params["cmap"].name == "Blues"
assert cmap_params["extend"] == "neither"
assert cmap_params["cmap"].N == 4
assert cmap_params["norm"].N == 5
cmap_params = _determine_cmap_params(data, levels=5, vmin=0.5, vmax=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "max"
cmap_params = _determine_cmap_params(data, levels=5, vmin=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "min"
cmap_params = _determine_cmap_params(data, levels=5, vmin=1.3, vmax=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "both"
def test_list_levels(self):
data = self.data + 1
orig_levels = [0, 1, 2, 3, 4, 5]
# vmin and vmax should be ignored if levels are explicitly provided
cmap_params = _determine_cmap_params(data, levels=orig_levels, vmin=0, vmax=3)
assert cmap_params["vmin"] is None
assert cmap_params["vmax"] is None
assert cmap_params["norm"].vmin == 0
assert cmap_params["norm"].vmax == 5
assert cmap_params["cmap"].N == 5
assert cmap_params["norm"].N == 6
for wrap_levels in [list, np.array, pd.Index, DataArray]:
cmap_params = _determine_cmap_params(data, levels=wrap_levels(orig_levels))
assert_array_equal(cmap_params["levels"], orig_levels)
def test_divergentcontrol(self):
neg = self.data - 0.1
pos = self.data
# Default with positive data will be a normal cmap
cmap_params = _determine_cmap_params(pos)
assert cmap_params["vmin"] == 0
assert cmap_params["vmax"] == 1
assert cmap_params["cmap"] == "viridis"
# Default with negative data will be a divergent cmap
cmap_params = _determine_cmap_params(neg)
assert cmap_params["vmin"] == -0.9
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "RdBu_r"
# Setting vmin or vmax should prevent this only if center is false
cmap_params = _determine_cmap_params(neg, vmin=-0.1, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "viridis"
cmap_params = _determine_cmap_params(neg, vmax=0.5, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "viridis"
# Setting center=False too
cmap_params = _determine_cmap_params(neg, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "viridis"
# However, I should still be able to set center and have a div cmap
cmap_params = _determine_cmap_params(neg, center=0)
assert cmap_params["vmin"] == -0.9
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "RdBu_r"
# Setting vmin or vmax alone will force symmetric bounds around center
cmap_params = _determine_cmap_params(neg, vmin=-0.1)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.1
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(neg, vmax=0.5)
assert cmap_params["vmin"] == -0.5
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(neg, vmax=0.6, center=0.1)
assert cmap_params["vmin"] == -0.4
assert cmap_params["vmax"] == 0.6
assert cmap_params["cmap"] == "RdBu_r"
# But this is only true if vmin or vmax are negative
cmap_params = _determine_cmap_params(pos, vmin=-0.1)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.1
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(pos, vmin=0.1)
assert cmap_params["vmin"] == 0.1
assert cmap_params["vmax"] == 1
assert cmap_params["cmap"] == "viridis"
cmap_params = _determine_cmap_params(pos, vmax=0.5)
assert cmap_params["vmin"] == 0
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "viridis"
# If both vmin and vmax are provided, output is non-divergent
cmap_params = _determine_cmap_params(neg, vmin=-0.2, vmax=0.6)
assert cmap_params["vmin"] == -0.2
assert cmap_params["vmax"] == 0.6
assert cmap_params["cmap"] == "viridis"
# regression test for GH3524
# infer diverging colormap from divergent levels
cmap_params = _determine_cmap_params(pos, levels=[-0.1, 0, 1])
# specifying levels makes cmap a Colormap object
assert cmap_params["cmap"].name == "RdBu_r"
def test_norm_sets_vmin_vmax(self):
vmin = self.data.min()
vmax = self.data.max()
for norm, extend, levels in zip(
[
mpl.colors.Normalize(),
mpl.colors.Normalize(),
mpl.colors.Normalize(vmin + 0.1, vmax - 0.1),
mpl.colors.Normalize(None, vmax - 0.1),
mpl.colors.Normalize(vmin + 0.1, None),
],
["neither", "neither", "both", "max", "min"],
[7, None, None, None, None],
):
test_min = vmin if norm.vmin is None else norm.vmin
test_max = vmax if norm.vmax is None else norm.vmax
cmap_params = _determine_cmap_params(self.data, norm=norm, levels=levels)
assert cmap_params["vmin"] is None
assert cmap_params["vmax"] is None
assert cmap_params["norm"].vmin == test_min
assert cmap_params["norm"].vmax == test_max
assert cmap_params["extend"] == extend
assert cmap_params["norm"] == norm
@requires_matplotlib
class TestDiscreteColorMap:
@pytest.fixture(autouse=True)
def setUp(self):
x = np.arange(start=0, stop=10, step=2)
y = np.arange(start=9, stop=-7, step=-3)
xy = np.dstack(np.meshgrid(x, y))
distance = np.linalg.norm(xy, axis=2)
self.darray = DataArray(distance, list(zip(("y", "x"), (y, x))))
self.data_min = distance.min()
self.data_max = distance.max()
yield
# Remove all matplotlib figures
plt.close("all")
@pytest.mark.slow
def test_recover_from_seaborn_jet_exception(self):
pal = _color_palette("jet", 4)
assert type(pal) == np.ndarray
assert len(pal) == 4
@pytest.mark.slow
def test_build_discrete_cmap(self):
for (cmap, levels, extend, filled) in [
("jet", [0, 1], "both", False),
("hot", [-4, 4], "max", True),
]:
ncmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
assert ncmap.N == len(levels) - 1
assert len(ncmap.colors) == len(levels) - 1
assert cnorm.N == len(levels)
assert_array_equal(cnorm.boundaries, levels)
assert max(levels) == cnorm.vmax
assert min(levels) == cnorm.vmin
if filled:
assert ncmap.colorbar_extend == extend
else:
assert ncmap.colorbar_extend == "max"
@pytest.mark.slow
def test_discrete_colormap_list_of_levels(self):
for extend, levels in [
("max", [-1, 2, 4, 8, 10]),
("both", [2, 5, 10, 11]),
("neither", [0, 5, 10, 15]),
("min", [2, 5, 10, 15]),
]:
for kind in ["imshow", "pcolormesh", "contourf", "contour"]:
primitive = getattr(self.darray.plot, kind)(levels=levels)
assert_array_equal(levels, primitive.norm.boundaries)
assert max(levels) == primitive.norm.vmax
assert min(levels) == primitive.norm.vmin
if kind != "contour":
assert extend == primitive.cmap.colorbar_extend
else:
assert "max" == primitive.cmap.colorbar_extend
assert len(levels) - 1 == len(primitive.cmap.colors)
@pytest.mark.slow
def test_discrete_colormap_int_levels(self):
for extend, levels, vmin, vmax, cmap in [
("neither", 7, None, None, None),
("neither", 7, None, 20, mpl.cm.RdBu),
("both", 7, 4, 8, None),
("min", 10, 4, 15, None),
]:
for kind in ["imshow", "pcolormesh", "contourf", "contour"]:
primitive = getattr(self.darray.plot, kind)(
levels=levels, vmin=vmin, vmax=vmax, cmap=cmap
)
assert levels >= len(primitive.norm.boundaries) - 1
if vmax is None:
assert primitive.norm.vmax >= self.data_max
else:
assert primitive.norm.vmax >= vmax
if vmin is None:
assert primitive.norm.vmin <= self.data_min
else:
assert primitive.norm.vmin <= vmin
if kind != "contour":
assert extend == primitive.cmap.colorbar_extend
else:
assert "max" == primitive.cmap.colorbar_extend
assert levels >= len(primitive.cmap.colors)
def test_discrete_colormap_list_levels_and_vmin_or_vmax(self):
levels = [0, 5, 10, 15]
primitive = self.darray.plot(levels=levels, vmin=-3, vmax=20)
assert primitive.norm.vmax == max(levels)
assert primitive.norm.vmin == min(levels)
def test_discrete_colormap_provided_boundary_norm(self):
norm = mpl.colors.BoundaryNorm([0, 5, 10, 15], 4)
primitive = self.darray.plot.contourf(norm=norm)
np.testing.assert_allclose(primitive.levels, norm.boundaries)
class Common2dMixin:
"""
Common tests for 2d plotting go here.
These tests assume that a staticmethod for `self.plotfunc` exists.
Should have the same name as the method.
"""
# Needs to be overridden in TestSurface for facet grid plots
subplot_kws: Union[Dict[Any, Any], None] = None
@pytest.fixture(autouse=True)
def setUp(self):
da = DataArray(
easy_array((10, 15), start=-1),
dims=["y", "x"],
coords={"y": np.arange(10), "x": np.arange(15)},
)
# add 2d coords
ds = da.to_dataset(name="testvar")
x, y = np.meshgrid(da.x.values, da.y.values)
ds["x2d"] = DataArray(x, dims=["y", "x"])
ds["y2d"] = DataArray(y, dims=["y", "x"])
ds = ds.set_coords(["x2d", "y2d"])
# set darray and plot method
self.darray = ds.testvar
# Add CF-compliant metadata
self.darray.attrs["long_name"] = "a_long_name"
self.darray.attrs["units"] = "a_units"
self.darray.x.attrs["long_name"] = "x_long_name"
self.darray.x.attrs["units"] = "x_units"
self.darray.y.attrs["long_name"] = "y_long_name"
self.darray.y.attrs["units"] = "y_units"
self.plotmethod = getattr(self.darray.plot, self.plotfunc.__name__)
def test_label_names(self):
self.plotmethod()
assert "x_long_name [x_units]" == plt.gca().get_xlabel()
assert "y_long_name [y_units]" == plt.gca().get_ylabel()
def test_1d_raises_valueerror(self):
with pytest.raises(ValueError, match=r"DataArray must be 2d"):
self.plotfunc(self.darray[0, :])
def test_bool(self):
xr.ones_like(self.darray, dtype=bool).plot()
def test_complex_raises_typeerror(self):
with pytest.raises(TypeError, match=r"complex128"):
(self.darray + 1j).plot()
def test_3d_raises_valueerror(self):
a = DataArray(easy_array((2, 3, 4)))
if self.plotfunc.__name__ == "imshow":
pytest.skip()
with pytest.raises(ValueError, match=r"DataArray must be 2d"):
self.plotfunc(a)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray(easy_array((3, 2)), coords=[["a", "b", "c"], ["d", "e"]])
with pytest.raises(TypeError, match=r"[Pp]lot"):
self.plotfunc(a)
def test_multiindex_raises_typeerror(self):
a = DataArray(
easy_array((3, 2)),
dims=("x", "y"),
coords=dict(x=("x", [0, 1, 2]), a=("y", [0, 1]), b=("y", [2, 3])),
)
a = a.set_index(y=("a", "b"))
with pytest.raises(TypeError, match=r"[Pp]lot"):
self.plotfunc(a)
def test_can_pass_in_axis(self):
self.pass_in_axis(self.plotmethod)
def test_xyincrease_defaults(self):
# With default settings the axis must be ordered regardless
# of the coords order.
self.plotfunc(DataArray(easy_array((3, 2)), coords=[[1, 2, 3], [1, 2]]))
bounds = plt.gca().get_ylim()
assert bounds[0] < bounds[1]
bounds = plt.gca().get_xlim()
assert bounds[0] < bounds[1]
# Inverted coords
self.plotfunc(DataArray(easy_array((3, 2)), coords=[[3, 2, 1], [2, 1]]))
bounds = plt.gca().get_ylim()
assert bounds[0] < bounds[1]
bounds = plt.gca().get_xlim()
assert bounds[0] < bounds[1]
def test_xyincrease_false_changes_axes(self):
self.plotmethod(xincrease=False, yincrease=False)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 14, xlim[1] - 0, ylim[0] - 9, ylim[1] - 0
assert all(abs(x) < 1 for x in diffs)
def test_xyincrease_true_changes_axes(self):
self.plotmethod(xincrease=True, yincrease=True)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 0, xlim[1] - 14, ylim[0] - 0, ylim[1] - 9
assert all(abs(x) < 1 for x in diffs)
def test_x_ticks_are_rotated_for_time(self):
time = pd.date_range("2000-01-01", "2000-01-10")
a = DataArray(np.random.randn(2, len(time)), [("xx", [1, 2]), ("t", time)])
a.plot(x="t")
rotation = plt.gca().get_xticklabels()[0].get_rotation()
assert rotation != 0
def test_plot_nans(self):
x1 = self.darray[:5]
x2 = self.darray.copy()
x2[5:] = np.nan
clim1 = self.plotfunc(x1).get_clim()
clim2 = self.plotfunc(x2).get_clim()
assert clim1 == clim2
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.filterwarnings("ignore:invalid value encountered")
def test_can_plot_all_nans(self):
# regression test for issue #1780
self.plotfunc(DataArray(np.full((2, 2), np.nan)))
@pytest.mark.filterwarnings("ignore: Attempting to set")
def test_can_plot_axis_size_one(self):
if self.plotfunc.__name__ not in ("contour", "contourf"):
self.plotfunc(DataArray(np.ones((1, 1))))
def test_disallows_rgb_arg(self):
with pytest.raises(ValueError):
# Always invalid for most plots. Invalid for imshow with 2D data.
self.plotfunc(DataArray(np.ones((2, 2))), rgb="not None")
def test_viridis_cmap(self):
cmap_name = self.plotmethod(cmap="viridis").get_cmap().name
assert "viridis" == cmap_name
def test_default_cmap(self):
cmap_name = self.plotmethod().get_cmap().name
assert "RdBu_r" == cmap_name
cmap_name = self.plotfunc(abs(self.darray)).get_cmap().name
assert "viridis" == cmap_name
@requires_seaborn
def test_seaborn_palette_as_cmap(self):
cmap_name = self.plotmethod(levels=2, cmap="husl").get_cmap().name
assert "husl" == cmap_name
def test_can_change_default_cmap(self):
cmap_name = self.plotmethod(cmap="Blues").get_cmap().name
assert "Blues" == cmap_name
def test_diverging_color_limits(self):
artist = self.plotmethod()
vmin, vmax = artist.get_clim()
assert round(abs(-vmin - vmax), 7) == 0
def test_xy_strings(self):
self.plotmethod("y", "x")
ax = plt.gca()
assert "y_long_name [y_units]" == ax.get_xlabel()
assert "x_long_name [x_units]" == ax.get_ylabel()
def test_positional_coord_string(self):
self.plotmethod(y="x")
ax = plt.gca()
assert "x_long_name [x_units]" == ax.get_ylabel()
assert "y_long_name [y_units]" == ax.get_xlabel()
self.plotmethod(x="x")
ax = plt.gca()
assert "x_long_name [x_units]" == ax.get_xlabel()
assert "y_long_name [y_units]" == ax.get_ylabel()
def test_bad_x_string_exception(self):
with pytest.raises(ValueError, match=r"x and y cannot be equal."):
self.plotmethod(x="y", y="y")
error_msg = "must be one of None, 'x', 'x2d', 'y', 'y2d'"
with pytest.raises(ValueError, match=rf"x {error_msg}"):
self.plotmethod("not_a_real_dim", "y")
with pytest.raises(ValueError, match=rf"x {error_msg}"):
self.plotmethod(x="not_a_real_dim")
with pytest.raises(ValueError, match=rf"y {error_msg}"):
self.plotmethod(y="not_a_real_dim")
self.darray.coords["z"] = 100
def test_coord_strings(self):
# 1d coords (same as dims)
assert {"x", "y"} == set(self.darray.dims)
self.plotmethod(y="y", x="x")
def test_non_linked_coords(self):
# plot with coordinate names that are not dimensions
self.darray.coords["newy"] = self.darray.y + 150
# Normal case, without transpose
self.plotfunc(self.darray, x="x", y="newy")
ax = plt.gca()
assert "x_long_name [x_units]" == ax.get_xlabel()
assert "newy" == ax.get_ylabel()
# ax limits might change between plotfuncs
# simply ensure that these high coords were passed over
assert np.min(ax.get_ylim()) > 100.0
def test_non_linked_coords_transpose(self):
# plot with coordinate names that are not dimensions,
# and with transposed y and x axes
# This used to raise an error with pcolormesh and contour
# https://github.com/pydata/xarray/issues/788
self.darray.coords["newy"] = self.darray.y + 150
self.plotfunc(self.darray, x="newy", y="x")
ax = plt.gca()
assert "newy" == ax.get_xlabel()
assert "x_long_name [x_units]" == ax.get_ylabel()
# ax limits might change between plotfuncs
# simply ensure that these high coords were passed over
assert np.min(ax.get_xlim()) > 100.0
def test_multiindex_level_as_coord(self):
da = DataArray(
easy_array((3, 2)),
dims=("x", "y"),
coords=dict(x=("x", [0, 1, 2]), a=("y", [0, 1]), b=("y", [2, 3])),
)
da = da.set_index(y=["a", "b"])
for x, y in (("a", "x"), ("b", "x"), ("x", "a"), ("x", "b")):
self.plotfunc(da, x=x, y=y)
ax = plt.gca()
assert x == ax.get_xlabel()
assert y == ax.get_ylabel()
with pytest.raises(ValueError, match=r"levels of the same MultiIndex"):
self.plotfunc(da, x="a", y="b")
with pytest.raises(ValueError, match=r"y must be one of None, 'a', 'b', 'x'"):
self.plotfunc(da, x="a", y="y")
def test_default_title(self):
a = DataArray(easy_array((4, 3, 2)), dims=["a", "b", "c"])
a.coords["c"] = [0, 1]
a.coords["d"] = "foo"
self.plotfunc(a.isel(c=1))
title = plt.gca().get_title()
assert "c = 1, d = foo" == title or "d = foo, c = 1" == title
def test_colorbar_default_label(self):
self.plotmethod(add_colorbar=True)
assert "a_long_name [a_units]" in text_in_fig()
def test_no_labels(self):
self.darray.name = "testvar"
self.darray.attrs["units"] = "test_units"
self.plotmethod(add_labels=False)
alltxt = text_in_fig()
for string in [
"x_long_name [x_units]",
"y_long_name [y_units]",
"testvar [test_units]",
]:
assert string not in alltxt
def test_colorbar_kwargs(self):
# replace label
self.darray.attrs.pop("long_name")
self.darray.attrs["units"] = "test_units"
# check default colorbar label
self.plotmethod(add_colorbar=True)
alltxt = text_in_fig()
assert "testvar [test_units]" in alltxt
self.darray.attrs.pop("units")
self.darray.name = "testvar"
self.plotmethod(add_colorbar=True, cbar_kwargs={"label": "MyLabel"})
alltxt = text_in_fig()
assert "MyLabel" in alltxt
assert "testvar" not in alltxt
# you can use anything accepted by the dict constructor as well
self.plotmethod(add_colorbar=True, cbar_kwargs=(("label", "MyLabel"),))
alltxt = text_in_fig()
assert "MyLabel" in alltxt
assert "testvar" not in alltxt
# change cbar ax
fig, (ax, cax) = plt.subplots(1, 2)
self.plotmethod(
ax=ax, cbar_ax=cax, add_colorbar=True, cbar_kwargs={"label": "MyBar"}
)
assert ax.has_data()
assert cax.has_data()
alltxt = text_in_fig()
assert "MyBar" in alltxt
assert "testvar" not in alltxt
# note that there are two ways to achieve this
fig, (ax, cax) = plt.subplots(1, 2)
self.plotmethod(
ax=ax, add_colorbar=True, cbar_kwargs={"label": "MyBar", "cax": cax}
)
assert ax.has_data()
assert cax.has_data()
alltxt = text_in_fig()
assert "MyBar" in alltxt
assert "testvar" not in alltxt
# see that no colorbar is respected
self.plotmethod(add_colorbar=False)
assert "testvar" not in text_in_fig()
# check that error is raised
pytest.raises(
ValueError,
self.plotmethod,
add_colorbar=False,
cbar_kwargs={"label": "label"},
)
def test_verbose_facetgrid(self):
a = easy_array((10, 15, 3))
d = DataArray(a, dims=["y", "x", "z"])
g = xplt.FacetGrid(d, col="z", subplot_kws=self.subplot_kws)
g.map_dataarray(self.plotfunc, "x", "y")
for ax in g.axes.flat:
assert ax.has_data()
def test_2d_function_and_method_signature_same(self):
func_sig = inspect.getcallargs(self.plotfunc, self.darray)
method_sig = inspect.getcallargs(self.plotmethod)
del method_sig["_PlotMethods_obj"]
del func_sig["darray"]
assert func_sig == method_sig
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"])
g = self.plotfunc(d, x="x", y="y", col="z", col_wrap=2)
assert_array_equal(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
assert ax.has_data()
if x == 0:
assert "y" == ax.get_ylabel()
else:
assert "" == ax.get_ylabel()
if y == 1:
assert "x" == ax.get_xlabel()
else:
assert "" == ax.get_xlabel()
# Infering labels
g = self.plotfunc(d, col="z", col_wrap=2)
assert_array_equal(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
assert ax.has_data()
if x == 0:
assert "y" == ax.get_ylabel()
else:
assert "" == ax.get_ylabel()
if y == 1:
assert "x" == ax.get_xlabel()
else:
assert "" == ax.get_xlabel()
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
g = self.plotfunc(d, x="x", y="y", col="columns", row="rows")
assert_array_equal(g.axes.shape, [3, 2])
for ax in g.axes.flat:
assert ax.has_data()
@pytest.mark.filterwarnings("ignore:This figure includes")
def test_facetgrid_map_only_appends_mappables(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
g = self.plotfunc(d, x="x", y="y", col="columns", row="rows")
expected = g._mappables
g.map(lambda: plt.plot(1, 1))
actual = g._mappables
assert expected == actual
def test_facetgrid_cmap(self):
# Regression test for GH592
data = np.random.random(size=(20, 25, 12)) + np.linspace(-3, 3, 12)
d = DataArray(data, dims=["x", "y", "time"])
fg = d.plot.pcolormesh(col="time")
# check that all color limits are the same
assert len({m.get_clim() for m in fg._mappables}) == 1
# check that all colormaps are the same
assert len({m.get_cmap().name for m in fg._mappables}) == 1
def test_facetgrid_cbar_kwargs(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
g = self.plotfunc(
d,
x="x",
y="y",
col="columns",
row="rows",
cbar_kwargs={"label": "test_label"},
)
# catch contour case
if g.cbar is not None:
assert get_colorbar_label(g.cbar) == "test_label"
def test_facetgrid_no_cbar_ax(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
with pytest.raises(ValueError):
self.plotfunc(d, x="x", y="y", col="columns", row="rows", cbar_ax=1)
def test_cmap_and_color_both(self):
with pytest.raises(ValueError):
self.plotmethod(colors="k", cmap="RdBu")
def test_2d_coord_with_interval(self):
for dim in self.darray.dims:
gp = self.darray.groupby_bins(dim, range(15), restore_coord_dims=True).mean(
dim
)
for kind in ["imshow", "pcolormesh", "contourf", "contour"]:
getattr(gp.plot, kind)()
def test_colormap_error_norm_and_vmin_vmax(self):
norm = mpl.colors.LogNorm(0.1, 1e1)
with pytest.raises(ValueError):
self.darray.plot(norm=norm, vmin=2)
with pytest.raises(ValueError):
self.darray.plot(norm=norm, vmax=2)
@pytest.mark.slow
class TestContourf(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contourf)
@pytest.mark.slow
def test_contourf_called(self):
# Having both statements ensures the test works properly
assert not self.contourf_called(self.darray.plot.imshow)
assert self.contourf_called(self.darray.plot.contourf)
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.contour.QuadContourSet)
@pytest.mark.slow
def test_extend(self):
artist = self.plotmethod()
assert artist.extend == "neither"
self.darray[0, 0] = -100
self.darray[-1, -1] = 100
artist = self.plotmethod(robust=True)
assert artist.extend == "both"
self.darray[0, 0] = 0
self.darray[-1, -1] = 0
artist = self.plotmethod(vmin=-0, vmax=10)
assert artist.extend == "min"
artist = self.plotmethod(vmin=-10, vmax=0)
assert artist.extend == "max"
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
@pytest.mark.slow
def test_levels(self):
artist = self.plotmethod(levels=[-0.5, -0.4, 0.1])
assert artist.extend == "both"
artist = self.plotmethod(levels=3)
assert artist.extend == "neither"
@pytest.mark.slow
class TestContour(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contour)
# matplotlib cmap.colors gives an rgbA ndarray
# when seaborn is used, instead we get an rgb tuple
@staticmethod
def _color_as_tuple(c):
return tuple(c[:3])
def test_colors(self):
# with single color, we don't want rgb array
artist = self.plotmethod(colors="k")
assert artist.cmap.colors[0] == "k"
artist = self.plotmethod(colors=["k", "b"])
assert self._color_as_tuple(artist.cmap.colors[1]) == (0.0, 0.0, 1.0)
artist = self.darray.plot.contour(
levels=[-0.5, 0.0, 0.5, 1.0], colors=["k", "r", "w", "b"]
)
assert self._color_as_tuple(artist.cmap.colors[1]) == (1.0, 0.0, 0.0)
assert self._color_as_tuple(artist.cmap.colors[2]) == (1.0, 1.0, 1.0)
# the last color is now under "over"
assert self._color_as_tuple(artist.cmap._rgba_over) == (0.0, 0.0, 1.0)
def test_colors_np_levels(self):
# https://github.com/pydata/xarray/issues/3284
levels = np.array([-0.5, 0.0, 0.5, 1.0])
artist = self.darray.plot.contour(levels=levels, colors=["k", "r", "w", "b"])
assert self._color_as_tuple(artist.cmap.colors[1]) == (1.0, 0.0, 0.0)
assert self._color_as_tuple(artist.cmap.colors[2]) == (1.0, 1.0, 1.0)
# the last color is now under "over"
assert self._color_as_tuple(artist.cmap._rgba_over) == (0.0, 0.0, 1.0)
def test_cmap_and_color_both(self):
with pytest.raises(ValueError):
self.plotmethod(colors="k", cmap="RdBu")
def list_of_colors_in_cmap_raises_error(self):
with pytest.raises(ValueError, match=r"list of colors"):
self.plotmethod(cmap=["k", "b"])
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
def test_single_level(self):
# this used to raise an error, but not anymore since
# add_colorbar defaults to false
self.plotmethod(levels=[0.1])
self.plotmethod(levels=1)
class TestPcolormesh(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.pcolormesh)
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.collections.QuadMesh)
def test_everything_plotted(self):
artist = self.plotmethod()
assert artist.get_array().size == self.darray.size
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
def test_dont_infer_interval_breaks_for_cartopy(self):
# Regression for GH 781
ax = plt.gca()
# Simulate a Cartopy Axis
setattr(ax, "projection", True)
artist = self.plotmethod(x="x2d", y="y2d", ax=ax)
assert isinstance(artist, mpl.collections.QuadMesh)
# Let cartopy handle the axis limits and artist size
assert artist.get_array().size <= self.darray.size
@pytest.mark.slow
class TestImshow(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.imshow)
@pytest.mark.slow
def test_imshow_called(self):
# Having both statements ensures the test works properly
assert not self.imshow_called(self.darray.plot.contourf)
assert self.imshow_called(self.darray.plot.imshow)
def test_xy_pixel_centered(self):
self.darray.plot.imshow(yincrease=False)
assert np.allclose([-0.5, 14.5], plt.gca().get_xlim())
assert np.allclose([9.5, -0.5], plt.gca().get_ylim())
def test_default_aspect_is_auto(self):
self.darray.plot.imshow()
assert "auto" == plt.gca().get_aspect()
@pytest.mark.slow
def test_cannot_change_mpl_aspect(self):
with pytest.raises(ValueError, match=r"not available in xarray"):
self.darray.plot.imshow(aspect="equal")
# with numbers we fall back to fig control
self.darray.plot.imshow(size=5, aspect=2)
assert "auto" == plt.gca().get_aspect()
assert tuple(plt.gcf().get_size_inches()) == (10, 5)
@pytest.mark.slow
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.image.AxesImage)
@pytest.mark.slow
@requires_seaborn
def test_seaborn_palette_needs_levels(self):
with pytest.raises(ValueError):
self.plotmethod(cmap="husl")
def test_2d_coord_names(self):
with pytest.raises(ValueError, match=r"requires 1D coordinates"):
self.plotmethod(x="x2d", y="y2d")
def test_plot_rgb_image(self):
DataArray(
easy_array((10, 15, 3), start=0), dims=["y", "x", "band"]
).plot.imshow()
assert 0 == len(find_possible_colorbars())
def test_plot_rgb_image_explicit(self):
DataArray(
easy_array((10, 15, 3), start=0), dims=["y", "x", "band"]
).plot.imshow(y="y", x="x", rgb="band")
assert 0 == len(find_possible_colorbars())
def test_plot_rgb_faceted(self):
DataArray(
easy_array((2, 2, 10, 15, 3), start=0), dims=["a", "b", "y", "x", "band"]
).plot.imshow(row="a", col="b")
assert 0 == len(find_possible_colorbars())
def test_plot_rgba_image_transposed(self):
# We can handle the color axis being in any position
DataArray(
easy_array((4, 10, 15), start=0), dims=["band", "y", "x"]
).plot.imshow()
def test_warns_ambigious_dim(self):
arr = DataArray(easy_array((3, 3, 3)), dims=["y", "x", "band"])
with pytest.warns(UserWarning):
arr.plot.imshow()
# but doesn't warn if dimensions specified
arr.plot.imshow(rgb="band")
arr.plot.imshow(x="x", y="y")
def test_rgb_errors_too_many_dims(self):
arr = DataArray(easy_array((3, 3, 3, 3)), dims=["y", "x", "z", "band"])
with pytest.raises(ValueError):
arr.plot.imshow(rgb="band")
def test_rgb_errors_bad_dim_sizes(self):
arr = DataArray(easy_array((5, 5, 5)), dims=["y", "x", "band"])
with pytest.raises(ValueError):
arr.plot.imshow(rgb="band")
def test_normalize_rgb_imshow(self):
for kwargs in (
dict(vmin=-1),
dict(vmax=2),
dict(vmin=-1, vmax=1),
dict(vmin=0, vmax=0),
dict(vmin=0, robust=True),
dict(vmax=-1, robust=True),
):
da = DataArray(easy_array((5, 5, 3), start=-0.6, stop=1.4))
arr = da.plot.imshow(**kwargs).get_array()
assert 0 <= arr.min() <= arr.max() <= 1, kwargs
def test_normalize_rgb_one_arg_error(self):
da = DataArray(easy_array((5, 5, 3), start=-0.6, stop=1.4))
# If passed one bound that implies all out of range, error:
for kwargs in [dict(vmax=-1), dict(vmin=2)]:
with pytest.raises(ValueError):
da.plot.imshow(**kwargs)
# If passed two that's just moving the range, *not* an error:
for kwargs in [dict(vmax=-1, vmin=-1.2), dict(vmin=2, vmax=2.1)]:
da.plot.imshow(**kwargs)
def test_imshow_rgb_values_in_valid_range(self):
da = DataArray(np.arange(75, dtype="uint8").reshape((5, 5, 3)))
_, ax = plt.subplots()
out = da.plot.imshow(ax=ax).get_array()
assert out.dtype == np.uint8
assert (out[..., :3] == da.values).all() # Compare without added alpha
@pytest.mark.filterwarnings("ignore:Several dimensions of this array")
def test_regression_rgb_imshow_dim_size_one(self):
# Regression: https://github.com/pydata/xarray/issues/1966
da = DataArray(easy_array((1, 3, 3), start=0.0, stop=1.0))
da.plot.imshow()
def test_origin_overrides_xyincrease(self):
da = DataArray(easy_array((3, 2)), coords=[[-2, 0, 2], [-1, 1]])
with figure_context():
da.plot.imshow(origin="upper")
assert plt.xlim()[0] < 0
assert plt.ylim()[1] < 0
with figure_context():
da.plot.imshow(origin="lower")
assert plt.xlim()[0] < 0
assert plt.ylim()[0] < 0
class TestSurface(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.surface)
subplot_kws = {"projection": "3d"}
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl_toolkits.mplot3d.art3d.Poly3DCollection)
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
assert f"{self.darray.long_name} [{self.darray.units}]" == ax.get_zlabel()
def test_xyincrease_false_changes_axes(self):
# Does not make sense for surface plots
pytest.skip("does not make sense for surface plots")
def test_xyincrease_true_changes_axes(self):
# Does not make sense for surface plots
pytest.skip("does not make sense for surface plots")
def test_can_pass_in_axis(self):
self.pass_in_axis(self.plotmethod, subplot_kw={"projection": "3d"})
def test_default_cmap(self):
# Does not make sense for surface plots with default arguments
pytest.skip("does not make sense for surface plots")
def test_diverging_color_limits(self):
# Does not make sense for surface plots with default arguments
pytest.skip("does not make sense for surface plots")
def test_colorbar_kwargs(self):
# Does not make sense for surface plots with default arguments
pytest.skip("does not make sense for surface plots")
def test_cmap_and_color_both(self):
# Does not make sense for surface plots with default arguments
pytest.skip("does not make sense for surface plots")
def test_seaborn_palette_as_cmap(self):
# seaborn does not work with mpl_toolkits.mplot3d
with pytest.raises(ValueError):
super().test_seaborn_palette_as_cmap()
# Need to modify this test for surface(), because all subplots should have labels,
# not just left and bottom
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"])
g = self.plotfunc(d, x="x", y="y", col="z", col_wrap=2)
assert_array_equal(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
assert ax.has_data()
assert "y" == ax.get_ylabel()
assert "x" == ax.get_xlabel()
# Infering labels
g = self.plotfunc(d, col="z", col_wrap=2)
assert_array_equal(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
assert ax.has_data()
assert "y" == ax.get_ylabel()
assert "x" == ax.get_xlabel()
@requires_matplotlib_3_3_0
def test_viridis_cmap(self):
return super().test_viridis_cmap()
@requires_matplotlib_3_3_0
def test_can_change_default_cmap(self):
return super().test_can_change_default_cmap()
@requires_matplotlib_3_3_0
def test_colorbar_default_label(self):
return super().test_colorbar_default_label()
@requires_matplotlib_3_3_0
def test_facetgrid_map_only_appends_mappables(self):
return super().test_facetgrid_map_only_appends_mappables()
class TestFacetGrid(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
d = easy_array((10, 15, 3))
self.darray = DataArray(d, dims=["y", "x", "z"], coords={"z": ["a", "b", "c"]})
self.g = xplt.FacetGrid(self.darray, col="z")
@pytest.mark.slow
def test_no_args(self):
self.g.map_dataarray(xplt.contourf, "x", "y")
# Don't want colorbar labeled with 'None'
alltxt = text_in_fig()
assert "None" not in alltxt
for ax in self.g.axes.flat:
assert ax.has_data()
@pytest.mark.slow
def test_names_appear_somewhere(self):
self.darray.name = "testvar"
self.g.map_dataarray(xplt.contourf, "x", "y")
for k, ax in zip("abc", self.g.axes.flat):
assert f"z = {k}" == ax.get_title()
alltxt = text_in_fig()
assert self.darray.name in alltxt
for label in ["x", "y"]:
assert label in alltxt
@pytest.mark.slow
def test_text_not_super_long(self):
self.darray.coords["z"] = [100 * letter for letter in "abc"]
g = xplt.FacetGrid(self.darray, col="z")
g.map_dataarray(xplt.contour, "x", "y")
alltxt = text_in_fig()
maxlen = max(len(txt) for txt in alltxt)
assert maxlen < 50
t0 = g.axes[0, 0].get_title()
assert t0.endswith("...")
@pytest.mark.slow
def test_colorbar(self):
vmin = self.darray.values.min()
vmax = self.darray.values.max()
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, "x", "y")
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
assert np.allclose(expected, clim)
assert 1 == len(find_possible_colorbars())
@pytest.mark.slow
def test_empty_cell(self):
g = xplt.FacetGrid(self.darray, col="z", col_wrap=2)
g.map_dataarray(xplt.imshow, "x", "y")
bottomright = g.axes[-1, -1]
assert not bottomright.has_data()
assert not bottomright.get_visible()
@pytest.mark.slow
def test_norow_nocol_error(self):
with pytest.raises(ValueError, match=r"[Rr]ow"):
xplt.FacetGrid(self.darray)
@pytest.mark.slow
def test_groups(self):
self.g.map_dataarray(xplt.imshow, "x", "y")
upperleft_dict = self.g.name_dicts[0, 0]
upperleft_array = self.darray.loc[upperleft_dict]
z0 = self.darray.isel(z=0)
assert_equal(upperleft_array, z0)
@pytest.mark.slow
def test_float_index(self):
self.darray.coords["z"] = [0.1, 0.2, 0.4]
g = xplt.FacetGrid(self.darray, col="z")
g.map_dataarray(xplt.imshow, "x", "y")
@pytest.mark.slow
def test_nonunique_index_error(self):
self.darray.coords["z"] = [0.1, 0.2, 0.2]
with pytest.raises(ValueError, match=r"[Uu]nique"):
xplt.FacetGrid(self.darray, col="z")
@pytest.mark.slow
def test_robust(self):
z = np.zeros((20, 20, 2))
darray = DataArray(z, dims=["y", "x", "z"])
darray[:, :, 1] = 1
darray[2, 0, 0] = -1000
darray[3, 0, 0] = 1000
g = xplt.FacetGrid(darray, col="z")
g.map_dataarray(xplt.imshow, "x", "y", robust=True)
# Color limits should be 0, 1
# The largest number displayed in the figure should be less than 21
numbers = set()
alltxt = text_in_fig()
for txt in alltxt:
try:
numbers.add(float(txt))
except ValueError:
pass
largest = max(abs(x) for x in numbers)
assert largest < 21
@pytest.mark.slow
def test_can_set_vmin_vmax(self):
vmin, vmax = 50.0, 1000.0
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, "x", "y", vmin=vmin, vmax=vmax)
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
assert np.allclose(expected, clim)
@pytest.mark.slow
def test_vmin_vmax_equal(self):
# regression test for GH3734
fg = self.g.map_dataarray(xplt.imshow, "x", "y", vmin=50, vmax=50)
for mappable in fg._mappables:
assert mappable.norm.vmin != mappable.norm.vmax
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore")
def test_can_set_norm(self):
norm = mpl.colors.SymLogNorm(0.1)
self.g.map_dataarray(xplt.imshow, "x", "y", norm=norm)
for image in plt.gcf().findobj(mpl.image.AxesImage):
assert image.norm is norm
@pytest.mark.slow
def test_figure_size(self):
assert_array_equal(self.g.fig.get_size_inches(), (10, 3))
g = xplt.FacetGrid(self.darray, col="z", size=6)
assert_array_equal(g.fig.get_size_inches(), (19, 6))
g = self.darray.plot.imshow(col="z", size=6)
assert_array_equal(g.fig.get_size_inches(), (19, 6))
g = xplt.FacetGrid(self.darray, col="z", size=4, aspect=0.5)
assert_array_equal(g.fig.get_size_inches(), (7, 4))
g = xplt.FacetGrid(self.darray, col="z", figsize=(9, 4))
assert_array_equal(g.fig.get_size_inches(), (9, 4))
with pytest.raises(ValueError, match=r"cannot provide both"):
g = xplt.plot(self.darray, row=2, col="z", figsize=(6, 4), size=6)
with pytest.raises(ValueError, match=r"Can't use"):
g = xplt.plot(self.darray, row=2, col="z", ax=plt.gca(), size=6)
@pytest.mark.slow
def test_num_ticks(self):
nticks = 99
maxticks = nticks + 1
self.g.map_dataarray(xplt.imshow, "x", "y")
self.g.set_ticks(max_xticks=nticks, max_yticks=nticks)
for ax in self.g.axes.flat:
xticks = len(ax.get_xticks())
yticks = len(ax.get_yticks())
assert xticks <= maxticks
assert yticks <= maxticks
assert xticks >= nticks / 2.0
assert yticks >= nticks / 2.0
@pytest.mark.slow
def test_map(self):
assert self.g._finalized is False
self.g.map(plt.contourf, "x", "y", Ellipsis)
assert self.g._finalized is True
self.g.map(lambda: None)
@pytest.mark.slow
def test_map_dataset(self):
g = xplt.FacetGrid(self.darray.to_dataset(name="foo"), col="z")
g.map(plt.contourf, "x", "y", "foo")
alltxt = text_in_fig()
for label in ["x", "y"]:
assert label in alltxt
# everything has a label
assert "None" not in alltxt
# colorbar can't be inferred automatically
assert "foo" not in alltxt
assert 0 == len(find_possible_colorbars())
g.add_colorbar(label="colors!")
assert "colors!" in text_in_fig()
assert 1 == len(find_possible_colorbars())
@pytest.mark.slow
def test_set_axis_labels(self):
g = self.g.map_dataarray(xplt.contourf, "x", "y")
g.set_axis_labels("longitude", "latitude")
alltxt = text_in_fig()
for label in ["longitude", "latitude"]:
assert label in alltxt
@pytest.mark.slow
def test_facetgrid_colorbar(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"], name="foo")
d.plot.imshow(x="x", y="y", col="z")
assert 1 == len(find_possible_colorbars())
d.plot.imshow(x="x", y="y", col="z", add_colorbar=True)
assert 1 == len(find_possible_colorbars())
d.plot.imshow(x="x", y="y", col="z", add_colorbar=False)
assert 0 == len(find_possible_colorbars())
@pytest.mark.slow
def test_facetgrid_polar(self):
# test if polar projection in FacetGrid does not raise an exception
self.darray.plot.pcolormesh(
col="z", subplot_kws=dict(projection="polar"), sharex=False, sharey=False
)
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
class TestFacetGrid4d(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
a = easy_array((10, 15, 3, 2))
darray = DataArray(a, dims=["y", "x", "col", "row"])
darray.coords["col"] = np.array(
["col" + str(x) for x in darray.coords["col"].values]
)
darray.coords["row"] = np.array(
["row" + str(x) for x in darray.coords["row"].values]
)
self.darray = darray
@pytest.mark.slow
def test_default_labels(self):
g = xplt.FacetGrid(self.darray, col="col", row="row")
assert (2, 3) == g.axes.shape
g.map_dataarray(xplt.imshow, "x", "y")
# Rightmost column should be labeled
for label, ax in zip(self.darray.coords["row"].values, g.axes[:, -1]):
assert substring_in_axes(label, ax)
# Top row should be labeled
for label, ax in zip(self.darray.coords["col"].values, g.axes[0, :]):
assert substring_in_axes(label, ax)
# ensure that row & col labels can be changed
g.set_titles("abc={value}")
for label, ax in zip(self.darray.coords["row"].values, g.axes[:, -1]):
assert substring_in_axes(f"abc={label}", ax)
# previous labels were "row=row0" etc.
assert substring_not_in_axes("row=", ax)
for label, ax in zip(self.darray.coords["col"].values, g.axes[0, :]):
assert substring_in_axes(f"abc={label}", ax)
# previous labels were "col=row0" etc.
assert substring_not_in_axes("col=", ax)
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
class TestFacetedLinePlotsLegend(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = xr.tutorial.scatter_example_dataset()
def test_legend_labels(self):
fg = self.darray.A.plot.line(col="x", row="w", hue="z")
all_legend_labels = [t.get_text() for t in fg.figlegend.texts]
# labels in legend should be ['0', '1', '2', '3']
assert sorted(all_legend_labels) == ["0", "1", "2", "3"]
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
class TestFacetedLinePlots(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = DataArray(
np.random.randn(10, 6, 3, 4),
dims=["hue", "x", "col", "row"],
coords=[range(10), range(6), range(3), ["A", "B", "C", "C++"]],
name="Cornelius Ortega the 1st",
)
self.darray.hue.name = "huename"
self.darray.hue.attrs["units"] = "hunits"
self.darray.x.attrs["units"] = "xunits"
self.darray.col.attrs["units"] = "colunits"
self.darray.row.attrs["units"] = "rowunits"
def test_facetgrid_shape(self):
g = self.darray.plot(row="row", col="col", hue="hue")
assert g.axes.shape == (len(self.darray.row), len(self.darray.col))
g = self.darray.plot(row="col", col="row", hue="hue")
assert g.axes.shape == (len(self.darray.col), len(self.darray.row))
def test_unnamed_args(self):
g = self.darray.plot.line("o--", row="row", col="col", hue="hue")
lines = [
q for q in g.axes.flat[0].get_children() if isinstance(q, mpl.lines.Line2D)
]
# passing 'o--' as argument should set marker and linestyle
assert lines[0].get_marker() == "o"
assert lines[0].get_linestyle() == "--"
def test_default_labels(self):
g = self.darray.plot(row="row", col="col", hue="hue")
# Rightmost column should be labeled
for label, ax in zip(self.darray.coords["row"].values, g.axes[:, -1]):
assert substring_in_axes(label, ax)
# Top row should be labeled
for label, ax in zip(self.darray.coords["col"].values, g.axes[0, :]):
assert substring_in_axes(str(label), ax)
# Leftmost column should have array name
for ax in g.axes[:, 0]:
assert substring_in_axes(self.darray.name, ax)
def test_test_empty_cell(self):
g = (
self.darray.isel(row=1)
.drop_vars("row")
.plot(col="col", hue="hue", col_wrap=2)
)
bottomright = g.axes[-1, -1]
assert not bottomright.has_data()
assert not bottomright.get_visible()
def test_set_axis_labels(self):
g = self.darray.plot(row="row", col="col", hue="hue")
g.set_axis_labels("longitude", "latitude")
alltxt = text_in_fig()
assert "longitude" in alltxt
assert "latitude" in alltxt
def test_axes_in_faceted_plot(self):
with pytest.raises(ValueError):
self.darray.plot.line(row="row", col="col", x="x", ax=plt.axes())
def test_figsize_and_size(self):
with pytest.raises(ValueError):
self.darray.plot.line(row="row", col="col", x="x", size=3, figsize=4)
def test_wrong_num_of_dimensions(self):
with pytest.raises(ValueError):
self.darray.plot(row="row", hue="hue")
self.darray.plot.line(row="row", hue="hue")
@requires_matplotlib
class TestDatasetQuiverPlots(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
das = [
DataArray(
np.random.randn(3, 3, 4, 4),
dims=["x", "y", "row", "col"],
coords=[range(k) for k in [3, 3, 4, 4]],
)
for _ in [1, 2]
]
ds = Dataset({"u": das[0], "v": das[1]})
ds.x.attrs["units"] = "xunits"
ds.y.attrs["units"] = "yunits"
ds.col.attrs["units"] = "colunits"
ds.row.attrs["units"] = "rowunits"
ds.u.attrs["units"] = "uunits"
ds.v.attrs["units"] = "vunits"
ds["mag"] = np.hypot(ds.u, ds.v)
self.ds = ds
def test_quiver(self):
with figure_context():
hdl = self.ds.isel(row=0, col=0).plot.quiver(x="x", y="y", u="u", v="v")
assert isinstance(hdl, mpl.quiver.Quiver)
with pytest.raises(ValueError, match=r"specify x, y, u, v"):
self.ds.isel(row=0, col=0).plot.quiver(x="x", y="y", u="u")
with pytest.raises(ValueError, match=r"hue_style"):
self.ds.isel(row=0, col=0).plot.quiver(
x="x", y="y", u="u", v="v", hue="mag", hue_style="discrete"
)
def test_facetgrid(self):
with figure_context():
fg = self.ds.plot.quiver(
x="x", y="y", u="u", v="v", row="row", col="col", scale=1, hue="mag"
)
for handle in fg._mappables:
assert isinstance(handle, mpl.quiver.Quiver)
assert "uunits" in fg.quiverkey.text.get_text()
with figure_context():
fg = self.ds.plot.quiver(
x="x",
y="y",
u="u",
v="v",
row="row",
col="col",
scale=1,
hue="mag",
add_guide=False,
)
assert fg.quiverkey is None
with pytest.raises(ValueError, match=r"Please provide scale"):
self.ds.plot.quiver(x="x", y="y", u="u", v="v", row="row", col="col")
@requires_matplotlib
class TestDatasetStreamplotPlots(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
das = [
DataArray(
np.random.randn(3, 3, 2, 2),
dims=["x", "y", "row", "col"],
coords=[range(k) for k in [3, 3, 2, 2]],
)
for _ in [1, 2]
]
ds = Dataset({"u": das[0], "v": das[1]})
ds.x.attrs["units"] = "xunits"
ds.y.attrs["units"] = "yunits"
ds.col.attrs["units"] = "colunits"
ds.row.attrs["units"] = "rowunits"
ds.u.attrs["units"] = "uunits"
ds.v.attrs["units"] = "vunits"
ds["mag"] = np.hypot(ds.u, ds.v)
self.ds = ds
def test_streamline(self):
with figure_context():
hdl = self.ds.isel(row=0, col=0).plot.streamplot(x="x", y="y", u="u", v="v")
assert isinstance(hdl, mpl.collections.LineCollection)
with pytest.raises(ValueError, match=r"specify x, y, u, v"):
self.ds.isel(row=0, col=0).plot.streamplot(x="x", y="y", u="u")
with pytest.raises(ValueError, match=r"hue_style"):
self.ds.isel(row=0, col=0).plot.streamplot(
x="x", y="y", u="u", v="v", hue="mag", hue_style="discrete"
)
def test_facetgrid(self):
with figure_context():
fg = self.ds.plot.streamplot(
x="x", y="y", u="u", v="v", row="row", col="col", hue="mag"
)
for handle in fg._mappables:
assert isinstance(handle, mpl.collections.LineCollection)
with figure_context():
fg = self.ds.plot.streamplot(
x="x",
y="y",
u="u",
v="v",
row="row",
col="col",
hue="mag",
add_guide=False,
)
@requires_matplotlib
class TestDatasetScatterPlots(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
das = [
DataArray(
np.random.randn(3, 3, 4, 4),
dims=["x", "row", "col", "hue"],
coords=[range(k) for k in [3, 3, 4, 4]],
)
for _ in [1, 2]
]
ds = Dataset({"A": das[0], "B": das[1]})
ds.hue.name = "huename"
ds.hue.attrs["units"] = "hunits"
ds.x.attrs["units"] = "xunits"
ds.col.attrs["units"] = "colunits"
ds.row.attrs["units"] = "rowunits"
ds.A.attrs["units"] = "Aunits"
ds.B.attrs["units"] = "Bunits"
self.ds = ds
def test_accessor(self):
from ..plot.dataset_plot import _Dataset_PlotMethods
assert Dataset.plot is _Dataset_PlotMethods
assert isinstance(self.ds.plot, _Dataset_PlotMethods)
@pytest.mark.parametrize(
"add_guide, hue_style, legend, colorbar",
[
(None, None, False, True),
(False, None, False, False),
(True, None, False, True),
(True, "continuous", False, True),
(False, "discrete", False, False),
(True, "discrete", True, False),
],
)
def test_add_guide(self, add_guide, hue_style, legend, colorbar):
meta_data = _infer_meta_data(
self.ds,
x="A",
y="B",
hue="hue",
hue_style=hue_style,
add_guide=add_guide,
funcname="scatter",
)
assert meta_data["add_legend"] is legend
assert meta_data["add_colorbar"] is colorbar
def test_facetgrid_shape(self):
g = self.ds.plot.scatter(x="A", y="B", row="row", col="col")
assert g.axes.shape == (len(self.ds.row), len(self.ds.col))
g = self.ds.plot.scatter(x="A", y="B", row="col", col="row")
assert g.axes.shape == (len(self.ds.col), len(self.ds.row))
def test_default_labels(self):
g = self.ds.plot.scatter("A", "B", row="row", col="col", hue="hue")
# Top row should be labeled
for label, ax in zip(self.ds.coords["col"].values, g.axes[0, :]):
assert substring_in_axes(str(label), ax)
# Bottom row should have name of x array name and units
for ax in g.axes[-1, :]:
assert ax.get_xlabel() == "A [Aunits]"
# Leftmost column should have name of y array name and units
for ax in g.axes[:, 0]:
assert ax.get_ylabel() == "B [Bunits]"
def test_axes_in_faceted_plot(self):
with pytest.raises(ValueError):
self.ds.plot.scatter(x="A", y="B", row="row", ax=plt.axes())
def test_figsize_and_size(self):
with pytest.raises(ValueError):
self.ds.plot.scatter(x="A", y="B", row="row", size=3, figsize=4)
@pytest.mark.parametrize(
"x, y, hue_style, add_guide",
[
("A", "B", "something", True),
("A", "B", "discrete", True),
("A", "B", None, True),
("A", "The Spanish Inquisition", None, None),
("The Spanish Inquisition", "B", None, True),
],
)
def test_bad_args(self, x, y, hue_style, add_guide):
with pytest.raises(ValueError):
self.ds.plot.scatter(x, y, hue_style=hue_style, add_guide=add_guide)
@pytest.mark.xfail(reason="datetime,timedelta hue variable not supported.")
@pytest.mark.parametrize("hue_style", ["discrete", "continuous"])
def test_datetime_hue(self, hue_style):
ds2 = self.ds.copy()
ds2["hue"] = pd.date_range("2000-1-1", periods=4)
ds2.plot.scatter(x="A", y="B", hue="hue", hue_style=hue_style)
ds2["hue"] = pd.timedelta_range("-1D", periods=4, freq="D")
ds2.plot.scatter(x="A", y="B", hue="hue", hue_style=hue_style)
def test_facetgrid_hue_style(self):
# Can't move this to pytest.mark.parametrize because py37-bare-minimum
# doesn't have matplotlib.
for hue_style, map_type in (
("discrete", list),
("continuous", mpl.collections.PathCollection),
):
g = self.ds.plot.scatter(
x="A", y="B", row="row", col="col", hue="hue", hue_style=hue_style
)
# for 'discrete' a list is appended to _mappables
# for 'continuous', should be single PathCollection
assert isinstance(g._mappables[-1], map_type)
@pytest.mark.parametrize(
"x, y, hue, markersize", [("A", "B", "x", "col"), ("x", "row", "A", "B")]
)
def test_scatter(self, x, y, hue, markersize):
self.ds.plot.scatter(x, y, hue=hue, markersize=markersize)
with pytest.raises(ValueError, match=r"u, v"):
self.ds.plot.scatter(x, y, u="col", v="row")
def test_non_numeric_legend(self):
ds2 = self.ds.copy()
ds2["hue"] = ["a", "b", "c", "d"]
lines = ds2.plot.scatter(x="A", y="B", hue="hue")
# should make a discrete legend
assert lines[0].axes.legend_ is not None
# and raise an error if explicitly not allowed to do so
with pytest.raises(ValueError):
ds2.plot.scatter(x="A", y="B", hue="hue", hue_style="continuous")
def test_legend_labels(self):
# regression test for #4126: incorrect legend labels
ds2 = self.ds.copy()
ds2["hue"] = ["a", "a", "b", "b"]
lines = ds2.plot.scatter(x="A", y="B", hue="hue")
assert [t.get_text() for t in lines[0].axes.get_legend().texts] == ["a", "b"]
def test_legend_labels_facetgrid(self):
ds2 = self.ds.copy()
ds2["hue"] = ["d", "a", "c", "b"]
g = ds2.plot.scatter(x="A", y="B", hue="hue", col="col")
legend_labels = tuple(t.get_text() for t in g.figlegend.texts)
attached_labels = [
tuple(m.get_label() for m in mappables_per_ax)
for mappables_per_ax in g._mappables
]
assert list(set(attached_labels)) == [legend_labels]
def test_add_legend_by_default(self):
sc = self.ds.plot.scatter(x="A", y="B", hue="hue")
assert len(sc.figure.axes) == 2
class TestDatetimePlot(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
"""
Create a DataArray with a time-axis that contains datetime objects.
"""
month = np.arange(1, 13, 1)
data = np.sin(2 * np.pi * month / 12.0)
darray = DataArray(data, dims=["time"])
darray.coords["time"] = np.array([datetime(2017, m, 1) for m in month])
self.darray = darray
def test_datetime_line_plot(self):
# test if line plot raises no Exception
self.darray.plot.line()
@pytest.mark.xfail(reason="recent versions of nc-time-axis and cftime are incompatible")
@pytest.mark.filterwarnings("ignore:setting an array element with a sequence")
@requires_nc_time_axis
@requires_cftime
class TestCFDatetimePlot(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
"""
Create a DataArray with a time-axis that contains cftime.datetime
objects.
"""
# case for 1d array
data = np.random.rand(4, 12)
time = xr.cftime_range(start="2017", periods=12, freq="1M", calendar="noleap")
darray = DataArray(data, dims=["x", "time"])
darray.coords["time"] = time
self.darray = darray
def test_cfdatetime_line_plot(self):
self.darray.isel(x=0).plot.line()
def test_cfdatetime_pcolormesh_plot(self):
self.darray.plot.pcolormesh()
def test_cfdatetime_contour_plot(self):
self.darray.plot.contour()
@requires_cftime
@pytest.mark.skipif(has_nc_time_axis, reason="nc_time_axis is installed")
class TestNcAxisNotInstalled(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
"""
Create a DataArray with a time-axis that contains cftime.datetime
objects.
"""
month = np.arange(1, 13, 1)
data = np.sin(2 * np.pi * month / 12.0)
darray = DataArray(data, dims=["time"])
darray.coords["time"] = xr.cftime_range(
start="2017", periods=12, freq="1M", calendar="noleap"
)
self.darray = darray
def test_ncaxis_notinstalled_line_plot(self):
with pytest.raises(ImportError, match=r"optional `nc-time-axis`"):
self.darray.plot.line()
test_da_list = [
DataArray(easy_array((10,))),
DataArray(easy_array((10, 3))),
DataArray(easy_array((10, 3, 2))),
]
@requires_matplotlib
class TestAxesKwargs:
@pytest.mark.parametrize("da", test_da_list)
@pytest.mark.parametrize("xincrease", [True, False])
def test_xincrease_kwarg(self, da, xincrease):
with figure_context():
da.plot(xincrease=xincrease)
assert plt.gca().xaxis_inverted() == (not xincrease)
@pytest.mark.parametrize("da", test_da_list)
@pytest.mark.parametrize("yincrease", [True, False])
def test_yincrease_kwarg(self, da, yincrease):
with figure_context():
da.plot(yincrease=yincrease)
assert plt.gca().yaxis_inverted() == (not yincrease)
@pytest.mark.parametrize("da", test_da_list)
@pytest.mark.parametrize("xscale", ["linear", "log", "logit", "symlog"])
def test_xscale_kwarg(self, da, xscale):
with figure_context():
da.plot(xscale=xscale)
assert plt.gca().get_xscale() == xscale
@pytest.mark.parametrize(
"da", [DataArray(easy_array((10,))), DataArray(easy_array((10, 3)))]
)
@pytest.mark.parametrize("yscale", ["linear", "log", "logit", "symlog"])
def test_yscale_kwarg(self, da, yscale):
with figure_context():
da.plot(yscale=yscale)
assert plt.gca().get_yscale() == yscale
@pytest.mark.parametrize("da", test_da_list)
def test_xlim_kwarg(self, da):
with figure_context():
expected = (0.0, 1000.0)
da.plot(xlim=[0, 1000])
assert plt.gca().get_xlim() == expected
@pytest.mark.parametrize("da", test_da_list)
def test_ylim_kwarg(self, da):
with figure_context():
da.plot(ylim=[0, 1000])
expected = (0.0, 1000.0)
assert plt.gca().get_ylim() == expected
@pytest.mark.parametrize("da", test_da_list)
def test_xticks_kwarg(self, da):
with figure_context():
da.plot(xticks=np.arange(5))
expected = np.arange(5).tolist()
assert_array_equal(plt.gca().get_xticks(), expected)
@pytest.mark.parametrize("da", test_da_list)
def test_yticks_kwarg(self, da):
with figure_context():
da.plot(yticks=np.arange(5))
expected = np.arange(5)
assert_array_equal(plt.gca().get_yticks(), expected)
@requires_matplotlib
@pytest.mark.parametrize("plotfunc", ["pcolormesh", "contourf", "contour"])
def test_plot_transposed_nondim_coord(plotfunc):
x = np.linspace(0, 10, 101)
h = np.linspace(3, 7, 101)
s = np.linspace(0, 1, 51)
z = s[:, np.newaxis] * h[np.newaxis, :]
da = xr.DataArray(
np.sin(x) * np.cos(z),
dims=["s", "x"],
coords={"x": x, "s": s, "z": (("s", "x"), z), "zt": (("x", "s"), z.T)},
)
with figure_context():
getattr(da.plot, plotfunc)(x="x", y="zt")
with figure_context():
getattr(da.plot, plotfunc)(x="zt", y="x")
@requires_matplotlib
@pytest.mark.parametrize("plotfunc", ["pcolormesh", "imshow"])
def test_plot_transposes_properly(plotfunc):
# test that we aren't mistakenly transposing when the 2 dimensions have equal sizes.
da = xr.DataArray([np.sin(2 * np.pi / 10 * np.arange(10))] * 10, dims=("y", "x"))
with figure_context():
hdl = getattr(da.plot, plotfunc)(x="x", y="y")
# get_array doesn't work for contour, contourf. It returns the colormap intervals.
# pcolormesh returns 1D array but imshow returns a 2D array so it is necessary
# to ravel() on the LHS
assert_array_equal(hdl.get_array().ravel(), da.to_masked_array().ravel())
@requires_matplotlib
def test_facetgrid_single_contour():
# regression test for GH3569
x, y = np.meshgrid(np.arange(12), np.arange(12))
z = xr.DataArray(np.sqrt(x ** 2 + y ** 2))
z2 = xr.DataArray(np.sqrt(x ** 2 + y ** 2) + 1)
ds = xr.concat([z, z2], dim="time")
ds["time"] = [0, 1]
with figure_context():
ds.plot.contour(col="time", levels=[4], colors=["k"])
@requires_matplotlib
def test_get_axis():
# test get_axis works with different args combinations
# and return the right type
# cannot provide both ax and figsize
with pytest.raises(ValueError, match="both `figsize` and `ax`"):
get_axis(figsize=[4, 4], size=None, aspect=None, ax="something")
# cannot provide both ax and size
with pytest.raises(ValueError, match="both `size` and `ax`"):
get_axis(figsize=None, size=200, aspect=4 / 3, ax="something")
# cannot provide both size and figsize
with pytest.raises(ValueError, match="both `figsize` and `size`"):
get_axis(figsize=[4, 4], size=200, aspect=None, ax=None)
# cannot provide aspect and size
with pytest.raises(ValueError, match="`aspect` argument without `size`"):
get_axis(figsize=None, size=None, aspect=4 / 3, ax=None)
with figure_context():
ax = get_axis()
assert isinstance(ax, mpl.axes.Axes)
@requires_cartopy
def test_get_axis_cartopy():
kwargs = {"projection": cartopy.crs.PlateCarree()}
with figure_context():
ax = get_axis(**kwargs)
assert isinstance(ax, cartopy.mpl.geoaxes.GeoAxesSubplot)
@requires_matplotlib
def test_maybe_gca():
with figure_context():
ax = _maybe_gca(aspect=1)
assert isinstance(ax, mpl.axes.Axes)
assert ax.get_aspect() == 1
with figure_context():
# create figure without axes
plt.figure()
ax = _maybe_gca(aspect=1)
assert isinstance(ax, mpl.axes.Axes)
assert ax.get_aspect() == 1
with figure_context():
existing_axes = plt.axes()
ax = _maybe_gca(aspect=1)
# re-uses the existing axes
assert existing_axes == ax
# kwargs are ignored when reusing axes
assert ax.get_aspect() == "auto"
| 35.760057 | 90 | 0.583793 |
acf604f8433899a4747af93a59588be426be8e70 | 1,299 | py | Python | kuka_kr210_arm/setup.py | noshluk2/noshluk2-ROS2-Ultimate-guide-for-Custom-Robotic-Arms-and-Kuka-Kr210 | 7327ed7f237d81a1d77d9102a2a668c46f90bf41 | [
"MIT"
] | 2 | 2022-02-02T20:17:44.000Z | 2022-03-21T09:47:46.000Z | kuka_kr210_arm/setup.py | noshluk2/noshluk2-ROS2-Ultimate-guide-for-Custom-Robotic-Arms-and-Kuka-Kr210 | 7327ed7f237d81a1d77d9102a2a668c46f90bf41 | [
"MIT"
] | null | null | null | kuka_kr210_arm/setup.py | noshluk2/noshluk2-ROS2-Ultimate-guide-for-Custom-Robotic-Arms-and-Kuka-Kr210 | 7327ed7f237d81a1d77d9102a2a668c46f90bf41 | [
"MIT"
] | 3 | 2021-11-02T05:50:52.000Z | 2022-03-30T17:24:55.000Z | from setuptools import setup
import os
from glob import glob
package_name = 'kuka_kr210_arm'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name,'launch'), glob('launch/*')),
(os.path.join('share', package_name,'urdf'), glob('urdf/*')),
(os.path.join('share', package_name,'config'), glob('config/*')),
(os.path.join('share', package_name,'meshes/collision'), glob('meshes/collision/*')),
(os.path.join('share', package_name,'meshes/visual'), glob('meshes/visual/*')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='luqman',
maintainer_email='noshluk2@gmail.com',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'trajectory_exec = kuka_kr210_arm.1_controller_test:main',
'inverse_kinematics = kuka_kr210_arm.2_inverse_kinematics_solution:main',
'sqaure_actionClient = kuka_kr210_arm.3_action_client_interface:main',
],
},
)
| 34.184211 | 93 | 0.642802 |
acf6054647135cf27d2f9f0376ebffa681a16a0d | 3,953 | py | Python | main.py | corso/codenation_data-science-0 | 598476dd61ad1697c585ce8549dd850177d6c528 | [
"MIT"
] | null | null | null | main.py | corso/codenation_data-science-0 | 598476dd61ad1697c585ce8549dd850177d6c528 | [
"MIT"
] | null | null | null | main.py | corso/codenation_data-science-0 | 598476dd61ad1697c585ce8549dd850177d6c528 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Desafio 1
#
# Para esse desafio, vamos trabalhar com o data set [Black Friday](https://www.kaggle.com/mehdidag/black-friday), que reúne dados sobre transações de compras em uma loja de varejo.
#
# Vamos utilizá-lo para praticar a exploração de data sets utilizando pandas. Você pode fazer toda análise neste mesmo notebook, mas as resposta devem estar nos locais indicados.
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# ## _Set up_ da análise
# In[1]:
import pandas as pd
import numpy as np
# In[2]:
black_friday = pd.read_csv("black_friday.csv")
# ## Inicie sua análise a partir daqui
# In[3]:
black_friday.head()
# ## Questão 1
#
# Quantas observações e quantas colunas há no dataset? Responda no formato de uma tuple `(n_observacoes, n_colunas)`.
# In[1]:
def q1():
return black_friday.shape
# ## Questão 2
#
# Há quantas mulheres com idade entre 26 e 35 anos no dataset? Responda como um único escalar.
# In[2]:
def q2():
df_q2 = black_friday.groupby('Age')['Gender'].value_counts()
return int(df_q2['26-35']['F'])
# ## Questão 3
#
# Quantos usuários únicos há no dataset? Responda como um único escalar.
# In[3]:
def q3():
return int(black_friday['User_ID'].nunique())
# ## Questão 4
#
# Quantos tipos de dados diferentes existem no dataset? Responda como um único escalar.
# In[4]:
def q4():
return black_friday.dtypes.nunique()
# ## Questão 5
#
# Qual porcentagem dos registros possui ao menos um valor null (`None`, `ǸaN` etc)? Responda como um único escalar entre 0 e 1.
# In[5]:
def q5():
total = black_friday.any(axis=1).sum()
nullLines = black_friday.isnull().any(axis=1).sum()
return float(nullLines / total)
# ## Questão 6
#
# Quantos valores null existem na variável (coluna) com o maior número de null? Responda como um único escalar.
# In[6]:
def q6():
maxNulls = 0
# percorre cada coluna do DF, comparando se a qtd de valores nulos é maior que o max encontrado ate entao
for i in black_friday.columns:
rowSum = black_friday[i].isnull().sum()
if (maxNulls < rowSum):
maxNulls = rowSum
return int(maxNulls)
# ## Questão 7
#
# Qual o valor mais frequente (sem contar nulls) em `Product_Category_3`? Responda como um único escalar.
# In[7]:
def q7():
# retorna um Pandas Series ordenada pelo valor mais recorrente
count = black_friday['Product_Category_3'].value_counts()
return count.first_valid_index()
# ## Questão 8
#
# Qual a nova média da variável (coluna) `Purchase` após sua normalização? Responda como um único escalar.
# In[8]:
def q8():
column = black_friday['Purchase']
normalized = (column - min(column)) / (max(column) - min(column))
return normalized.mean()
# ## Questão 9
#
# Quantas ocorrências entre -1 e 1 inclusive existem da variáel `Purchase` após sua padronização? Responda como um único escalar.
# In[9]:
def q9():
column = black_friday['Purchase']
standardized = (column - column.mean()) / column.std()
df_standard = pd.DataFrame(standardized.sort_values())
return len(df_standard.query('Purchase >= -1 and Purchase <= 1'))
# ## Questão 10
#
# Podemos afirmar que se uma observação é null em `Product_Category_2` ela também o é em `Product_Category_3`? Responda com um bool (`True`, `False`).
# In[10]:
def q10():
dfpc = pd.DataFrame({'Product_Category_2':black_friday.Product_Category_2.isnull(),
'Product_Category_3':black_friday.Product_Category_3.isnull()})
count_pc2 = len(dfpc.query('Product_Category_2 == True'))
count_both = len(dfpc.query('Product_Category_2 == True and Product_Category_3 == True'))
return bool(count_pc2 == count_both)
| 24.103659 | 181 | 0.657981 |
acf607f883e83c43ab3ac35099311e0263d67226 | 10,274 | py | Python | zeus/networks/pytorch/necks/ffm.py | shaido987/vega | 14d5d49fb8bdf96bd1f3fcfac201ce6b6712c3b6 | [
"MIT"
] | 240 | 2020-08-15T15:11:49.000Z | 2022-03-28T07:26:23.000Z | zeus/networks/pytorch/necks/ffm.py | WholeG/vega | d1ccf1c3ce68a118bdb6775594ceed0f895911e7 | [
"MIT"
] | 20 | 2020-08-29T06:18:21.000Z | 2022-03-21T04:35:57.000Z | zeus/networks/pytorch/necks/ffm.py | WholeG/vega | d1ccf1c3ce68a118bdb6775594ceed0f895911e7 | [
"MIT"
] | 69 | 2020-08-15T15:41:53.000Z | 2022-03-16T08:27:47.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""CurveLane neck for detection."""
import torch
import torch.nn as nn
from ..blocks.layer_creator import LayerCreator
from zeus.common import ClassType, ClassFactory
class ConvPack(nn.Module):
"""ConvPack.
:param block: block function
:type block: nn.Module
:param inplanes: input feature map channel num
:type inplanes: int
:param planes: output feature map channel num
:type planes: int
:param arch: model arch
:type arch: list
:param groups: group num
:type groups: int
:param base_width: base width
:type base_width: int
:param base_channel: base channel
:type base_channel: int
:param stride: stride
:type stride: int
:param dilation: dilation
:type dilation: int
:param style: style
:type style: str
:param conv_cfg: conv config
:type conv_cfg: dict
:param norm_cfg: norm config
:type norm_cfg: dict
:return: Conv pack layer
:rtype: nn.Module
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=None,
norm_cfg=None,
activation='relu',
inplace=True):
super().__init__()
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.inplace = inplace
self.with_norm = norm_cfg is not None
self.with_activatation = activation is not None
if bias == 'auto':
bias = False if self.with_norm else True
self.with_bias = bias
conv_creator = LayerCreator(**conv_cfg)
self.conv = conv_creator.create_layer(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.with_norm:
norm_channels = out_channels
norm_creator = LayerCreator(**norm_cfg)
norm = norm_creator.create_layer(num_features=norm_channels)
self.norm_name = norm_creator.get_name()
self.add_module(self.norm_name, norm)
if self.with_activatation:
act_cfg = {'type': 'ReLU'}
act_creator = LayerCreator(**act_cfg)
self.activate = act_creator.create_layer(inplace=inplace)
def norm(self, x):
"""Apply norm."""
x = getattr(self, self.norm_name)(x)
return x
def forward(self, x, activate=True, norm=True):
"""Forward compute.
:param x: input feature map
:type x: tensor
:param activate: whether activate or not
:type activate: bool
:param norm: whether norm or not
:type norm: bool
:return: output feature map
:rtype: tensor
"""
x = self.conv(x)
if norm and self.with_norm:
x = self.norm(x)
if activate and self.with_activatation:
x = self.activate(x)
return x
class FeatureFusionNetwork(nn.Module):
"""The Core of FeatureFusionNetwork.
:param out_channels: out_channels
:type out_channels: int
:param num_outs: num_outs
:type num_outs: int
:param start_level: start_level
:type start_level: int
:param end_level: end_level
:type end_level: int
:param in_channels: in_channels
:type in_channels: int
:param add_extra_convs: add_extra_convs
:type add_extra_convs: bool
:param extra_convs_on_inputs: extra_convs_on_inputs
:type extra_convs_on_inputs: bool
:param relu_before_extra_convs: relu_before_extra_convs
:type relu_before_extra_convs: bool
:param conv_cfg: conv_cfg
:type conv_cfg: dict
:param norm_cfg: norm_cfg
:type norm_cfg: dict
:param activation: activation
:type activation: dict
:param feature_fusion_arch_str: feature_fusion_arch_str
:type feature_fusion_arch_str: atr
"""
def __init__(self,
out_channels=128,
num_outs=4,
start_level=0,
end_level=-1,
in_channels=None,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
conv_cfg=None,
norm_cfg=None,
activation=None,
feature_fusion_arch_str=None):
super(FeatureFusionNetwork, self).__init__()
if conv_cfg is None:
conv_cfg = {'type': 'Conv'}
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
if end_level == -1:
self.backbone_end_level = self.num_ins
else:
self.backbone_end_level = end_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.feature_fusion_arch_str = feature_fusion_arch_str
self.c34_maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.c24_maxpool = nn.MaxPool2d(kernel_size=5, stride=4, padding=1)
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvPack(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
fpn_conv = ConvPack(
out_channels * 2,
out_channels * 2,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvPack(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def decoder_ffm_arch(self):
"""Decode ffm arch."""
feature_fusion_arch = []
block_arch = []
for i in self.feature_fusion_arch_str:
if i == '-':
feature_fusion_arch.append(block_arch)
block_arch = []
else:
block_arch.append(int(i))
feature_fusion_arch.append(block_arch)
return feature_fusion_arch
def forward(self, inputs):
"""Forward method."""
build_out = []
fpn_arch = self.decoder_ffm_arch()
for i in range(len(fpn_arch)):
input1, input2 = fpn_arch[i][0], fpn_arch[i][1]
laterals = [self.lateral_convs[input1](inputs[input1]), self.lateral_convs[input2](inputs[input2])]
# sum of the two input
if input1 == 0:
laterals[0] = self.c24_maxpool(laterals[0])
elif input1 == 1:
laterals[0] = self.c34_maxpool(laterals[0])
if input2 == 0:
laterals[1] = self.c24_maxpool(laterals[1])
elif input2 == 1:
laterals[1] = self.c34_maxpool(laterals[1])
build_out.append(self.fpn_convs[i](torch.cat((laterals[0], laterals[1]), 1)))
outs = torch.cat((inputs[2], torch.cat((build_out[0], build_out[1]), 1)), 1)
return outs
def PseudoFeatureFusionNetwork(feature_map_list):
"""Pseudo FeatureFusionNetwork, just get the third layer of target featuremap."""
return feature_map_list[2]
def ArchChannels2Module(feature_fusion_arch_code, in_channels):
"""Ffn warpper."""
if feature_fusion_arch_code != '-':
return FeatureFusionNetwork(in_channels=in_channels,
out_channels=64,
num_outs=4,
feature_fusion_arch_str=feature_fusion_arch_code)
else:
return PseudoFeatureFusionNetwork
@ClassFactory.register(ClassType.NETWORK)
class FeatureFusionModule(nn.Module):
"""FeatureFusionModule backbone.
:param desc: Description of ResNetVariantDet.
:type desc: NetworkDesc
"""
def __init__(self, desc):
super(FeatureFusionModule, self).__init__()
self.in_channels = desc["in_channels"][0:4]
self.feature_fusion_arch_code = desc["arch_code"]
self.num_ins = len(self.in_channels)
self.neck = ArchChannels2Module(self.feature_fusion_arch_code, self.in_channels)
def forward(self, inputs):
"""Get the result of ffm."""
out = self.neck(inputs[0:4])
return out
def init_weights(self):
"""Initialize ffm weight."""
if self.feature_fusion_arch_code != '-':
self.neck.init_weights()
| 33.685246 | 111 | 0.592564 |
acf6084849b727a0bd483473a5c1126751966f70 | 21,868 | py | Python | privaterooms/privaterooms.py | Onii-Chan-Discord/OB13-Cogs | 56320d393361e76d521c8aca51787df81e1e933c | [
"MIT"
] | 10 | 2021-02-18T18:15:16.000Z | 2022-02-26T01:49:10.000Z | privaterooms/privaterooms.py | Onii-Chan-Discord/OB13-Cogs | 56320d393361e76d521c8aca51787df81e1e933c | [
"MIT"
] | 37 | 2021-01-22T17:23:16.000Z | 2022-03-21T14:39:55.000Z | privaterooms/privaterooms.py | Kami-DiscordBot/OB13-Cogs | d89396df93874425e79f21dbaf089bd06e934e6e | [
"MIT"
] | 27 | 2021-01-22T13:25:17.000Z | 2022-03-28T20:49:39.000Z | """
MIT License
Copyright (c) 2021 Obi-Wan3
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
import discord
from redbot.core import commands, Config
class PrivateRooms(commands.Cog):
"""
Automatic Private VCs with Lobby
Private VCs that are created automatically, with permission overrides for a lobby channel.
"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=14000605, force_registration=True)
default_guild = {
"toggle": False,
"systems": {},
}
self.config.register_guild(**default_guild)
self.bot.loop.create_task(self.initialize())
async def initialize(self) -> None:
await self.bot.wait_until_red_ready()
all_guilds = await self.config.all_guilds()
for g in all_guilds.keys():
if guild := self.bot.get_guild(g):
async with self.config.guild(guild).all() as guild_settings:
for sys in guild_settings['systems'].values():
for a in sys['active']:
vc = guild.get_channel(a[0])
if not vc or not vc.members:
sys['active'].remove(a)
if vc and not vc.members and vc.permissions_for(guild.me).manage_channels:
await vc.delete(reason="PrivateRooms: unused VC found on cog load")
@commands.Cog.listener("on_voice_state_update")
async def _voice_listener(self, member: discord.Member, before, after):
if (
not await self.config.guild(member.guild).toggle() or # PrivateRooms toggled off
member.bot or # Member is a bot
await self.bot.cog_disabled_in_guild(self, member.guild) # Cog disabled in guild
):
return
leftroom = False
joinedroom = False
# Moved channels
if before.channel and after.channel:
async with self.config.guild(member.guild).systems() as systems:
for sys in systems.values():
if not sys['toggle']:
continue
active_vcs = [x[0] for x in sys['active']]
# Member joined an active PrivateRoom
log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild)
if log_channel and sys['lobby'] == before.channel.id and after.channel.id in active_vcs:
await self._send_log(
channel=log_channel,
text=f"{member.mention} joined `{after.channel.name}`",
color=discord.Color.magenta(),
embed_links=embed_links,
)
# Member left a PrivateRoom
if before.channel.id in active_vcs and before.channel.id != after.channel.id:
leftroom = True
# Member went into the origin channel
if sys['origin'] == after.channel.id != before.channel.id:
joinedroom = True
if leftroom and joinedroom:
break
# Left a channel
if (before.channel and not after.channel) or leftroom:
async with self.config.guild(member.guild).systems() as systems:
for sys in systems.values():
# Skip system if not toggled on
if not sys['toggle']:
continue
for a in sys['active']:
if not a[0] == before.channel.id:
continue
# Owner left channel
if a[1] == member.id:
remaining = None
for m in before.channel.members:
if not m.bot and m.id != member.id:
remaining = m
break
lobby = member.guild.get_channel(sys['lobby'])
new_overwrites_lobby = lobby.overwrites
new_overwrites_before = before.channel.overwrites
# Reassign to another user
if remaining:
a[1] = remaining.id
new_overwrites_before.pop(member)
new_overwrites_before.update({remaining: discord.PermissionOverwrite(move_members=True, view_channel=True, connect=True)})
if before.channel.permissions_for(member.guild.me).manage_channels:
await before.channel.edit(
name=sys['channel_name'].replace("{creator}", remaining.display_name),
overwrites=new_overwrites_before,
reason=f"PrivateRooms: {member.display_name} left their VC, channel reassigned to {remaining.display_name}"
)
else:
return
new_overwrites_lobby.pop(member)
new_overwrites_lobby.update({remaining: discord.PermissionOverwrite(move_members=True)})
if lobby.permissions_for(member.guild.me).manage_channels:
await lobby.edit(
overwrites=new_overwrites_lobby,
reason=f"PrivateRooms: {member.display_name} has left their VC, channel reassigned to {remaining.display_name}"
)
else:
return
log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild)
if log_channel:
await self._send_log(
channel=log_channel,
text=f"{member.mention} left `{before.channel.name}`, channel reassigned to {remaining.mention}",
color=discord.Color.teal(),
embed_links=embed_links,
)
# Remove channel
else:
sys['active'].remove(a)
if before.channel.permissions_for(member.guild.me).manage_channels:
await before.channel.delete(reason="PrivateRooms: all users have left")
else:
return
new_overwrites_lobby.pop(member)
if lobby.permissions_for(member.guild.me).manage_channels:
await lobby.edit(
overwrites=new_overwrites_lobby,
reason=f"PrivateRooms: {member.display_name}'s private VC has been deleted"
)
else:
return
log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild)
if log_channel:
await self._send_log(
channel=log_channel,
text=f"{member.mention} left `{before.channel.name}`, channel removed",
color=discord.Color.dark_teal(),
embed_links=embed_links,
)
break
# Joined a channel
if (not before.channel and after.channel) or joinedroom:
async with self.config.guild(member.guild).systems() as systems:
for sys in systems.values():
# Joined an Origin channel of a system that is toggled on
if sys['toggle'] and sys['origin'] == after.channel.id:
# Create their private VC
if not after.channel.category.permissions_for(member.guild.me).manage_channels:
return
private_vc = await member.guild.create_voice_channel(
name=sys['channel_name'].replace("{creator}", member.display_name),
category=after.channel.category,
bitrate=min(sys['bitrate']*1000, member.guild.bitrate_limit),
reason=f"PrivateRooms: created by {member.display_name}",
overwrites={
member: discord.PermissionOverwrite(move_members=True, view_channel=True, connect=True),
member.guild.default_role: discord.PermissionOverwrite(connect=False),
member.guild.me: discord.PermissionOverwrite(connect=True)
}
)
# Move creator to their private room
if not (after.channel.permissions_for(member.guild.me).move_members and private_vc.permissions_for(member.guild.me).move_members):
return
await member.move_to(private_vc, reason="PrivateRooms: is VC creator")
# Edit Lobby channel to have permission overwrite
lobby = member.guild.get_channel(sys['lobby'])
new_overwrites = lobby.overwrites
new_overwrites[member] = discord.PermissionOverwrite(move_members=True)
if not lobby.permissions_for(member.guild.me).manage_channels:
return
await lobby.edit(
overwrites=new_overwrites,
reason=f"PrivateRooms: {member.display_name} has created a new private VC"
)
# If log channel set, then send logs
log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild)
if log_channel:
await self._send_log(
channel=log_channel,
text=f"{member.mention} joined {before.channel.mention} and created `{private_vc.name}`",
color=discord.Color.teal(),
embed_links=embed_links,
)
# Add to active list
sys['active'].append((private_vc.id, member.id))
break
@staticmethod
async def _get_log(channel_id, guild: discord.Guild):
log_channel, embed_links = None, False
if channel_id:
log_channel = guild.get_channel(channel_id)
if not log_channel or not log_channel.permissions_for(guild.me).send_messages:
log_channel = None
if log_channel and log_channel.permissions_for(guild.me).embed_links:
embed_links = True
return log_channel, embed_links
@staticmethod
async def _send_log(channel: discord.TextChannel, text: str, color: discord.Color, embed_links: bool):
if embed_links:
return await channel.send(embed=discord.Embed(
timestamp=datetime.utcnow(),
color=color,
description=text
))
else:
return await channel.send(
text,
allowed_mentions=discord.AllowedMentions.none()
)
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.group(name="privaterooms")
async def _privaterooms(self, ctx: commands.Context):
"""Set Up Private VC Systems"""
@_privaterooms.command(name="toggle")
async def _toggle(self, ctx: commands.Context, true_or_false: bool):
"""Toggle PrivateRooms in this server."""
await self.config.guild(ctx.guild).toggle.set(true_or_false)
return await ctx.tick()
@_privaterooms.command(name="add")
async def _add(self, ctx: commands.Context, system_name: str, origin_channel: discord.VoiceChannel, lobby_channel: discord.VoiceChannel, default_bitrate_in_kbps: int, *, channel_name_template: str):
"""
Add a new PrivateRooms system in this server.
For the `channel_name_template`, enter a string, with `{creator}` contained if you want it to be replaced with the VC creator's display name.
"""
if origin_channel.category and not origin_channel.category.permissions_for(ctx.guild.me).manage_channels:
return await ctx.send("I don't have the `Manage Channels` permission in that category!")
elif not origin_channel.category and not ctx.guild.me.guild_permissions.manage_channels:
return await ctx.send("I don't have the `Manage Channels` permission in this server!")
async with self.config.guild(ctx.guild).systems() as systems:
if system_name in systems.keys():
return await ctx.send("There is already a PrivateRooms system with that name!")
systems[system_name] = {
"toggle": True,
"origin": origin_channel.id,
"lobby": lobby_channel.id,
"bitrate": default_bitrate_in_kbps,
"channel_name": channel_name_template,
"log_channel": None,
"active": []
}
return await ctx.send(f'A new PrivateRooms system with origin channel `{origin_channel.name}` and lobby `{lobby_channel.name}` has been created and toggled on. If you would like to toggle it or set a log channel, please use `{ctx.clean_prefix}privaterooms edit logchannel {system_name}`.')
@_privaterooms.group(name="edit")
async def _edit(self, ctx: commands.Context):
"""Edit a PrivateRooms System"""
@_edit.command(name="toggle")
async def _edit_toggle(self, ctx: commands.Context, system_name: str, true_or_false: bool):
"""Toggle a PrivateRooms system in this server."""
async with self.config.guild(ctx.guild).systems() as systems:
systems[system_name]["toggle"] = true_or_false
return await ctx.tick()
@_edit.command(name="origin")
async def _edit_origin(self, ctx: commands.Context, system_name: str, origin_channel: discord.VoiceChannel):
"""Edit the Origin channel for a PrivateRooms system in this server."""
async with self.config.guild(ctx.guild).systems() as systems:
if system_name not in systems.keys():
return await ctx.send("There was no PrivateRooms system found with that name!")
systems[system_name]["origin"] = origin_channel.id
return await ctx.tick()
@_edit.command(name="lobby")
async def _edit_lobby(self, ctx: commands.Context, system_name: str, lobby_channel: discord.VoiceChannel):
"""Edit the Lobby channel for a PrivateRooms system in this server."""
async with self.config.guild(ctx.guild).systems() as systems:
if system_name not in systems.keys():
return await ctx.send("There was no PrivateRooms system found with that name!")
systems[system_name]["lobby"] = lobby_channel.id
return await ctx.tick()
@_edit.command(name="bitrate")
async def _edit_bitrate(self, ctx: commands.Context, system_name: str, bitrate_in_kbps: int):
"""Edit the new VC bitrate (in kbps) for a PrivateRooms system in this server."""
async with self.config.guild(ctx.guild).systems() as systems:
if system_name not in systems.keys():
return await ctx.send("There was no PrivateRooms system found with that name!")
systems[system_name]["bitrate"] = bitrate_in_kbps
return await ctx.tick()
@_edit.command(name="name")
async def _edit_name(self, ctx: commands.Context, system_name: str, *, channel_name_template: str):
"""
Edit the Lobby channel for a PrivateRooms system in this server.
Enter a string, with `{creator}` contained if you want it to be replaced with the VC creator's display name.
"""
async with self.config.guild(ctx.guild).systems() as systems:
if system_name not in systems.keys():
return await ctx.send("There was no PrivateRooms system found with that name!")
systems[system_name]["channel_name"] = channel_name_template
return await ctx.tick()
@_edit.command(name="logchannel")
async def _edit_log_channel(self, ctx: commands.Context, system_name: str, channel: discord.TextChannel = None):
"""Edit the log channel for a PrivateRooms system in this server (leave blank to set to None)."""
async with self.config.guild(ctx.guild).systems() as systems:
if system_name not in systems.keys():
return await ctx.send("There was no PrivateRooms system found with that name!")
if channel:
if not channel.permissions_for(ctx.guild.me).send_messages:
return await ctx.send(f"I cannot send messages to {channel.mention}!")
systems[system_name]["log_channel"] = channel.id
else:
systems[system_name]["log_channel"] = None
return await ctx.tick()
@_privaterooms.command(name="remove", aliases=["delete"])
async def _remove(self, ctx: commands.Context, system_name: str, enter_true_to_confirm: bool):
"""Remove a PrivateRooms system in this server."""
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).systems() as systems:
if system_name not in systems.keys():
return await ctx.send("There was no PrivateRooms system found with that name!")
del systems[system_name]
return await ctx.send(f"The PrivateRooms system `{system_name}` was removed.")
@_privaterooms.command(name="clearactive")
async def _clear_active(self, ctx: commands.Context, system_name: str, enter_true_to_confirm: bool):
"""Clears the cache of current active PrivateRooms."""
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with self.config.guild(ctx.guild).systems() as systems:
if system_name not in systems.keys():
return await ctx.send("There was no PrivateRooms system found with that name!")
systems[system_name]["active"] = []
return await ctx.send(f"The active rooms in `{system_name}` were cleared.")
@commands.bot_has_permissions(embed_links=True)
@_privaterooms.command(name="view")
async def _view(self, ctx: commands.Context):
"""View the PrivateRooms settings in this server."""
settings = await self.config.guild(ctx.guild).all()
embed = discord.Embed(title="PrivateRooms Settings", color=await ctx.embed_color(), description=f"""
**Server Toggle:** {settings['toggle']}
{"**Systems:** None" if not settings['systems'] else ""}
""")
for name, system in settings['systems'].items():
origin, lobby, log = None, None, None
if ori := ctx.guild.get_channel(system['origin']):
origin = ori.name
if lob := ctx.guild.get_channel(system['lobby']):
lobby = lob.name
if system['log_channel'] and (glo := ctx.guild.get_channel(system['log_channel'])):
log = glo.mention
embed.add_field(
name=f"System `{name}`",
inline=False,
value=f"""
**Toggle:** {system['toggle']}
**Origin:** {origin}
**Lobby:** {lobby}
**BitRate:** {system['bitrate']} kbps
**Name Template:** {system['channel_name']}
**Log Channel:** {log}
"""
)
return await ctx.send(embed=embed)
| 48.167401 | 297 | 0.560591 |
acf60921577688a392bb6ecb8410fc823067e098 | 3,833 | py | Python | udsoncan/exceptions.py | autopi-io/py-udsoncan | 2351ee02bf4a70e5661d6fd5f48f58db740f244e | [
"MIT"
] | 1 | 2021-03-21T12:18:23.000Z | 2021-03-21T12:18:23.000Z | udsoncan/exceptions.py | autopi-io/py-udsoncan | 2351ee02bf4a70e5661d6fd5f48f58db740f244e | [
"MIT"
] | null | null | null | udsoncan/exceptions.py | autopi-io/py-udsoncan | 2351ee02bf4a70e5661d6fd5f48f58db740f244e | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import inspect
def service_name(service):
if inspect.isclass(service):
return unicode(service.__name__)
else:
return unicode(service.__class__.__name__)
class TimeoutException(Exception):
u"""
Simple extension of ``Exception`` with no additional property. Raised when a timeout in the communication happens.
"""
def __init__(self, *args, **kwargs):
super(TimeoutException, self).__init__(*args, **kwargs)
class NegativeResponseException(Exception):
u"""
Raised when the server returns a negative response (response code starting by 0x7F).
The response that triggered the exception is available in ``e.response``
:param response: The response that triggered the exception
:type response: :ref:`Response<Response>`
"""
def __init__(self, response, *args, **kwargs):
self.response = response
msg = self.make_msg(response)
if len(args) > 0 :
msg += u" "+unicode(args[0])
args = tuple(list(args)[1:])
super(NegativeResponseException, self).__init__(msg, *args, **kwargs)
def make_msg(self, response):
servicename = response.service.get_name()+u" " if response.service is not None else u""
return u"%sservice execution returned a negative response %s (0x%x)" % (servicename, response.code_name, response.code)
class InvalidResponseException(Exception):
u"""
Raised when a service fails to decode a server response data. A bad message length or a value that is out of range may both be valid causes.
The response that triggered the exception is available in ``e.response``
:param response: The response that triggered the exception
:type response: :ref:`Response<Response>`
"""
def __init__(self, response, *args, **kwargs):
self.response = response
msg = self.make_msg(response)
if len(args) > 0 :
msg += u" "+unicode(args[0])
args = tuple(list(args)[1:])
super(InvalidResponseException, self).__init__(msg, *args, **kwargs)
def make_msg(self, response):
servicename = response.service.get_name()+u" " if response.service is not None else u""
reason = u"" if response.valid else u" Reason : %s" % (response.invalid_reason)
return u"%sservice execution returned an invalid response.%s" % (servicename,reason)
class UnexpectedResponseException(Exception):
u"""
Raised when the client receives a valid response but considers the one received to not be the expected response.
The response that triggered the exception is available in ``e.response``
:param response: The response that triggered the exception
:type response: :ref:`Response<Response>`
:param details: Additional details about the error
:type details: string
"""
def __init__(self, response, details=u"<No details given>", *args, **kwargs):
self.response = response
msg = self.make_msg(response, details)
if len(args) > 0 :
msg += u" "+unicode(args[0])
args = tuple(list(args)[1:])
super(UnexpectedResponseException, self).__init__(msg, *args, **kwargs)
def make_msg(self, response, details):
servicename = response.service.get_name()+u" " if response.service is not None else u""
return u"%sservice execution returned a valid response but unexpected. Details : %s " % (servicename, details)
class ConfigError(Exception):
u"""
Raised when a bad configuration element is encountered.
:param key: The configuration key that failed to resolve properly
:type key: object
"""
def __init__(self, key, msg=u"<No details given>", *args, **kwargs):
self.key=key
super(ConfigError, self).__init__(msg, *args, **kwargs)
| 41.663043 | 144 | 0.675189 |
acf60aaf7066115e5d1f32c70691ab35cb0c74a1 | 1,189 | py | Python | modules/cephes/doc/tanh.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | 1 | 2022-03-24T03:35:10.000Z | 2022-03-24T03:35:10.000Z | modules/cephes/doc/tanh.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | null | null | null | modules/cephes/doc/tanh.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | null | null | null | [ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'T',
},
'simd_types' : [],
'special' : ['cephes'],
'type_defs' : [],
'types' : ['real_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 01/03/2011',
'included' : ['#include <nt2/include/functions/tanh.hpp>'],
'notes' : [],
'stamp' : 'modified by jt the 01/03/2011',
},
'ranges' : {
'default' : [['T(-100)', 'T(100)']],
},
'specific_values' : {
},
'verif_test' : {
'property_call' : {
'default' : ['nt2::cephes::tanh(a0)'],
},
'property_value' : {
'default' : ['nt2::tanh(a0)'],
},
'simd' : {
},
'ulp_thresh' : {
'default' : ['1.0'],
},
},
},
},
]
| 27.022727 | 72 | 0.333894 |
acf60aef857c8a0e983719b0d42ef1501c193168 | 2,080 | py | Python | Visualization-for-Company-Stakeholders-/code.py | Hacker-UT/Data_Science | 9c71e41a1a9b6e848886c6fd31358c488359f79b | [
"MIT"
] | 1 | 2020-06-21T08:36:43.000Z | 2020-06-21T08:36:43.000Z | Visualization-for-Company-Stakeholders-/code.py | Hacker-UT/greyatom-python-for-data-science | 9c71e41a1a9b6e848886c6fd31358c488359f79b | [
"MIT"
] | null | null | null | Visualization-for-Company-Stakeholders-/code.py | Hacker-UT/greyatom-python-for-data-science | 9c71e41a1a9b6e848886c6fd31358c488359f79b | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Reading the file
data=pd.read_csv(path)
#Code starts here
# Step 1
#Creating a new variable to store the value counts
loan_status=data['Loan_Status'].value_counts()
#Plotting bar plot
loan_status.plot(kind='bar')
# Step 2
#Plotting an unstacked bar plot
property_and_loan=data.groupby(['Property_Area','Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar',stacked=False)
#Changing the x-axis label
plt.xlabel('Property Area')
#Changing the y-axis label
plt.ylabel('Loan Status')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
# Step 3
#Plotting a stacked bar plot
education_and_loan=data.groupby(['Education','Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar',stacked=True)
#Changing the x-axis label
plt.xlabel('Education Status')
#Changing the y-axis label
plt.ylabel('Loan Status')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
# Step 4
#Subsetting the dataframe based on 'Education' column
graduate=data[data['Education']=='Graduate']
#Subsetting the dataframe based on 'Education' column
not_graduate=data[data['Education']=='Not Graduate']
#Plotting density plot for 'Graduate'
graduate.plot(kind='density',label='Graduate')
#Plotting density plot for 'Graduate'
not_graduate.plot(kind='density',label='Not Graduate')
#For automatic legend display
# Step 5
#Setting up the subplots
fig ,(ax_1,ax_2,ax_3)=plt.subplots(3,1)
#Plotting scatter plot
data.plot(ax=ax_1).scatter(x='ApplicantIncome',y='LoanAmount')
#Setting the subplot axis title
plt.xlabel('Applicant Income')
#Plotting scatter plot
data.plot(ax=ax_2).scatter(x='CoapplicantIncome',y='LoanAmount')
#Setting the subplot axis title
plt.xlabel('Coapplicant Income')
#Creating a new column 'TotalIncome'
data['TotalIncome']=data['ApplicantIncome'] + data['CoapplicantIncome']
#Plotting scatter plot
data.plot(ax=ax_3).scatter(x='TotalIncome',y='LoanAmount')
#Setting the subplot axis title
plt.xlabel('Total Income')
| 20.594059 | 80 | 0.755769 |
acf60baddc6bd09afe920fd6a010fa62ab0ef837 | 16,443 | py | Python | synapse/types.py | cuongnv/synapse | bb6c9008f1bba3c8e7e13051f0f8333f62ed8f31 | [
"Apache-2.0"
] | 1 | 2021-05-31T23:35:36.000Z | 2021-05-31T23:35:36.000Z | synapse/types.py | cuongnv/synapse | bb6c9008f1bba3c8e7e13051f0f8333f62ed8f31 | [
"Apache-2.0"
] | 1 | 2020-02-10T10:03:31.000Z | 2020-02-10T10:03:31.000Z | synapse/types.py | cuongnv/synapse | bb6c9008f1bba3c8e7e13051f0f8333f62ed8f31 | [
"Apache-2.0"
] | 1 | 2020-01-30T11:03:37.000Z | 2020-01-30T11:03:37.000Z | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import string
import sys
from collections import namedtuple
from typing import Any, Dict, Tuple, TypeVar
import attr
from signedjson.key import decode_verify_key_bytes
from unpaddedbase64 import decode_base64
from synapse.api.errors import Codes, SynapseError
# define a version of typing.Collection that works on python 3.5
if sys.version_info[:3] >= (3, 6, 0):
from typing import Collection
else:
from typing import Sized, Iterable, Container
T_co = TypeVar("T_co", covariant=True)
class Collection(Iterable[T_co], Container[T_co], Sized):
__slots__ = ()
# Define a state map type from type/state_key to T (usually an event ID or
# event)
T = TypeVar("T")
StateMap = Dict[Tuple[str, str], T]
# the type of a JSON-serialisable dict. This could be made stronger, but it will
# do for now.
JsonDict = Dict[str, Any]
class Requester(
namedtuple(
"Requester", ["user", "access_token_id", "is_guest", "device_id", "app_service"]
)
):
"""
Represents the user making a request
Attributes:
user (UserID): id of the user making the request
access_token_id (int|None): *ID* of the access token used for this
request, or None if it came via the appservice API or similar
is_guest (bool): True if the user making this request is a guest user
device_id (str|None): device_id which was set at authentication time
app_service (ApplicationService|None): the AS requesting on behalf of the user
"""
def serialize(self):
"""Converts self to a type that can be serialized as JSON, and then
deserialized by `deserialize`
Returns:
dict
"""
return {
"user_id": self.user.to_string(),
"access_token_id": self.access_token_id,
"is_guest": self.is_guest,
"device_id": self.device_id,
"app_server_id": self.app_service.id if self.app_service else None,
}
@staticmethod
def deserialize(store, input):
"""Converts a dict that was produced by `serialize` back into a
Requester.
Args:
store (DataStore): Used to convert AS ID to AS object
input (dict): A dict produced by `serialize`
Returns:
Requester
"""
appservice = None
if input["app_server_id"]:
appservice = store.get_app_service_by_id(input["app_server_id"])
return Requester(
user=UserID.from_string(input["user_id"]),
access_token_id=input["access_token_id"],
is_guest=input["is_guest"],
device_id=input["device_id"],
app_service=appservice,
)
def create_requester(
user_id, access_token_id=None, is_guest=False, device_id=None, app_service=None
):
"""
Create a new ``Requester`` object
Args:
user_id (str|UserID): id of the user making the request
access_token_id (int|None): *ID* of the access token used for this
request, or None if it came via the appservice API or similar
is_guest (bool): True if the user making this request is a guest user
device_id (str|None): device_id which was set at authentication time
app_service (ApplicationService|None): the AS requesting on behalf of the user
Returns:
Requester
"""
if not isinstance(user_id, UserID):
user_id = UserID.from_string(user_id)
return Requester(user_id, access_token_id, is_guest, device_id, app_service)
def get_domain_from_id(string):
idx = string.find(":")
if idx == -1:
raise SynapseError(400, "Invalid ID: %r" % (string,))
return string[idx + 1 :]
def get_localpart_from_id(string):
idx = string.find(":")
if idx == -1:
raise SynapseError(400, "Invalid ID: %r" % (string,))
return string[1:idx]
class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "domain"))):
"""Common base class among ID/name strings that have a local part and a
domain name, prefixed with a sigil.
Has the fields:
'localpart' : The local part of the name (without the leading sigil)
'domain' : The domain part of the name
"""
# Deny iteration because it will bite you if you try to create a singleton
# set by:
# users = set(user)
def __iter__(self):
raise ValueError("Attempted to iterate a %s" % (type(self).__name__,))
# Because this class is a namedtuple of strings and booleans, it is deeply
# immutable.
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
@classmethod
def from_string(cls, s: str):
"""Parse the string given by 's' into a structure object."""
if len(s) < 1 or s[0:1] != cls.SIGIL:
raise SynapseError(
400,
"Expected %s string to start with '%s'" % (cls.__name__, cls.SIGIL),
Codes.INVALID_PARAM,
)
parts = s[1:].split(":", 1)
if len(parts) != 2:
raise SynapseError(
400,
"Expected %s of the form '%slocalname:domain'"
% (cls.__name__, cls.SIGIL),
Codes.INVALID_PARAM,
)
domain = parts[1]
# This code will need changing if we want to support multiple domain
# names on one HS
return cls(localpart=parts[0], domain=domain)
def to_string(self):
"""Return a string encoding the fields of the structure object."""
return "%s%s:%s" % (self.SIGIL, self.localpart, self.domain)
@classmethod
def is_valid(cls, s):
try:
cls.from_string(s)
return True
except Exception:
return False
__repr__ = to_string
class UserID(DomainSpecificString):
"""Structure representing a user ID."""
SIGIL = "@"
class RoomAlias(DomainSpecificString):
"""Structure representing a room name."""
SIGIL = "#"
class RoomID(DomainSpecificString):
"""Structure representing a room id. """
SIGIL = "!"
class EventID(DomainSpecificString):
"""Structure representing an event id. """
SIGIL = "$"
class GroupID(DomainSpecificString):
"""Structure representing a group ID."""
SIGIL = "+"
@classmethod
def from_string(cls, s):
group_id = super(GroupID, cls).from_string(s)
if not group_id.localpart:
raise SynapseError(400, "Group ID cannot be empty", Codes.INVALID_PARAM)
if contains_invalid_mxid_characters(group_id.localpart):
raise SynapseError(
400,
"Group ID can only contain characters a-z, 0-9, or '=_-./'",
Codes.INVALID_PARAM,
)
return group_id
mxid_localpart_allowed_characters = set(
"_-./=" + string.ascii_lowercase + string.digits
)
def contains_invalid_mxid_characters(localpart):
"""Check for characters not allowed in an mxid or groupid localpart
Args:
localpart (basestring): the localpart to be checked
Returns:
bool: True if there are any naughty characters
"""
return any(c not in mxid_localpart_allowed_characters for c in localpart)
UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
# the following is a pattern which matches '=', and bytes which are not allowed in a mxid
# localpart.
#
# It works by:
# * building a string containing the allowed characters (excluding '=')
# * escaping every special character with a backslash (to stop '-' being interpreted as a
# range operator)
# * wrapping it in a '[^...]' regex
# * converting the whole lot to a 'bytes' sequence, so that we can use it to match
# bytes rather than strings
#
NON_MXID_CHARACTER_PATTERN = re.compile(
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters - {"="})),)).encode(
"ascii"
)
)
def map_username_to_mxid_localpart(username, case_sensitive=False):
"""Map a username onto a string suitable for a MXID
This follows the algorithm laid out at
https://matrix.org/docs/spec/appendices.html#mapping-from-other-character-sets.
Args:
username (unicode|bytes): username to be mapped
case_sensitive (bool): true if TEST and test should be mapped
onto different mxids
Returns:
unicode: string suitable for a mxid localpart
"""
if not isinstance(username, bytes):
username = username.encode("utf-8")
# first we sort out upper-case characters
if case_sensitive:
def f1(m):
return b"_" + m.group().lower()
username = UPPER_CASE_PATTERN.sub(f1, username)
else:
username = username.lower()
# then we sort out non-ascii characters
def f2(m):
g = m.group()[0]
if isinstance(g, str):
# on python 2, we need to do a ord(). On python 3, the
# byte itself will do.
g = ord(g)
return b"=%02x" % (g,)
username = NON_MXID_CHARACTER_PATTERN.sub(f2, username)
# we also do the =-escaping to mxids starting with an underscore.
username = re.sub(b"^_", b"=5f", username)
# we should now only have ascii bytes left, so can decode back to a
# unicode.
return username.decode("ascii")
class StreamToken(
namedtuple(
"Token",
(
"room_key",
"presence_key",
"typing_key",
"receipt_key",
"account_data_key",
"push_rules_key",
"to_device_key",
"device_list_key",
"groups_key",
),
)
):
_SEPARATOR = "_"
START = None # type: StreamToken
@classmethod
def from_string(cls, string):
try:
keys = string.split(cls._SEPARATOR)
while len(keys) < len(cls._fields):
# i.e. old token from before receipt_key
keys.append("0")
return cls(*keys)
except Exception:
raise SynapseError(400, "Invalid Token")
def to_string(self):
return self._SEPARATOR.join([str(k) for k in self])
@property
def room_stream_id(self):
# TODO(markjh): Awful hack to work around hacks in the presence tests
# which assume that the keys are integers.
if type(self.room_key) is int:
return self.room_key
else:
return int(self.room_key[1:].split("-")[-1])
def is_after(self, other):
"""Does this token contain events that the other doesn't?"""
return (
(other.room_stream_id < self.room_stream_id)
or (int(other.presence_key) < int(self.presence_key))
or (int(other.typing_key) < int(self.typing_key))
or (int(other.receipt_key) < int(self.receipt_key))
or (int(other.account_data_key) < int(self.account_data_key))
or (int(other.push_rules_key) < int(self.push_rules_key))
or (int(other.to_device_key) < int(self.to_device_key))
or (int(other.device_list_key) < int(self.device_list_key))
or (int(other.groups_key) < int(self.groups_key))
)
def copy_and_advance(self, key, new_value):
"""Advance the given key in the token to a new value if and only if the
new value is after the old value.
"""
new_token = self.copy_and_replace(key, new_value)
if key == "room_key":
new_id = new_token.room_stream_id
old_id = self.room_stream_id
else:
new_id = int(getattr(new_token, key))
old_id = int(getattr(self, key))
if old_id < new_id:
return new_token
else:
return self
def copy_and_replace(self, key, new_value):
return self._replace(**{key: new_value})
StreamToken.START = StreamToken(*(["s0"] + ["0"] * (len(StreamToken._fields) - 1)))
class RoomStreamToken(namedtuple("_StreamToken", "topological stream")):
"""Tokens are positions between events. The token "s1" comes after event 1.
s0 s1
| |
[0] V [1] V [2]
Tokens can either be a point in the live event stream or a cursor going
through historic events.
When traversing the live event stream events are ordered by when they
arrived at the homeserver.
When traversing historic events the events are ordered by their depth in
the event graph "topological_ordering" and then by when they arrived at the
homeserver "stream_ordering".
Live tokens start with an "s" followed by the "stream_ordering" id of the
event it comes after. Historic tokens start with a "t" followed by the
"topological_ordering" id of the event it comes after, followed by "-",
followed by the "stream_ordering" id of the event it comes after.
"""
__slots__ = [] # type: list
@classmethod
def parse(cls, string):
try:
if string[0] == "s":
return cls(topological=None, stream=int(string[1:]))
if string[0] == "t":
parts = string[1:].split("-", 1)
return cls(topological=int(parts[0]), stream=int(parts[1]))
except Exception:
pass
raise SynapseError(400, "Invalid token %r" % (string,))
@classmethod
def parse_stream_token(cls, string):
try:
if string[0] == "s":
return cls(topological=None, stream=int(string[1:]))
except Exception:
pass
raise SynapseError(400, "Invalid token %r" % (string,))
def __str__(self):
if self.topological is not None:
return "t%d-%d" % (self.topological, self.stream)
else:
return "s%d" % (self.stream,)
class ThirdPartyInstanceID(
namedtuple("ThirdPartyInstanceID", ("appservice_id", "network_id"))
):
# Deny iteration because it will bite you if you try to create a singleton
# set by:
# users = set(user)
def __iter__(self):
raise ValueError("Attempted to iterate a %s" % (type(self).__name__,))
# Because this class is a namedtuple of strings, it is deeply immutable.
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
@classmethod
def from_string(cls, s):
bits = s.split("|", 2)
if len(bits) != 2:
raise SynapseError(400, "Invalid ID %r" % (s,))
return cls(appservice_id=bits[0], network_id=bits[1])
def to_string(self):
return "%s|%s" % (self.appservice_id, self.network_id)
__str__ = to_string
@classmethod
def create(cls, appservice_id, network_id):
return cls(appservice_id=appservice_id, network_id=network_id)
@attr.s(slots=True)
class ReadReceipt(object):
"""Information about a read-receipt"""
room_id = attr.ib()
receipt_type = attr.ib()
user_id = attr.ib()
event_ids = attr.ib()
data = attr.ib()
def get_verify_key_from_cross_signing_key(key_info):
"""Get the key ID and signedjson verify key from a cross-signing key dict
Args:
key_info (dict): a cross-signing key dict, which must have a "keys"
property that has exactly one item in it
Returns:
(str, VerifyKey): the key ID and verify key for the cross-signing key
"""
# make sure that exactly one key is provided
if "keys" not in key_info:
raise ValueError("Invalid key")
keys = key_info["keys"]
if len(keys) != 1:
raise ValueError("Invalid key")
# and return that one key
for key_id, key_data in keys.items():
return (key_id, decode_verify_key_bytes(key_id, decode_base64(key_data)))
| 30.907895 | 90 | 0.624764 |
acf60bd05affc1ee93e2fba666c35f6955237f6a | 12,472 | py | Python | qa/rpc-tests/util.py | blowsbig/blowsbig | 47a2539406bc3d455454c0e3460bfcae36ae76e7 | [
"MIT"
] | null | null | null | qa/rpc-tests/util.py | blowsbig/blowsbig | 47a2539406bc3d455454c0e3460bfcae36ae76e7 | [
"MIT"
] | null | null | null | qa/rpc-tests/util.py | blowsbig/blowsbig | 47a2539406bc3d455454c0e3460bfcae36ae76e7 | [
"MIT"
] | null | null | null | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Copyright (c) 2018 BSG Developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "blowsbig.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
blowsbigd and blowsbig-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run blowsbigd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "blowsbigd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "blowsbig-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in blowsbig.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a blowsbigd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "blowsbigd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "blowsbig-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple blowsbigds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| 35.942363 | 108 | 0.648653 |
acf60cbe1807ffd8697748697e75dd8635d9f9fe | 1,458 | py | Python | tests/test_class_oelint_vars_fileextrapaths.py | QuakeSaver/oelint-adv | e03617b51c7ebdeb8ea245eb61da3e3e03195b37 | [
"BSD-2-Clause"
] | null | null | null | tests/test_class_oelint_vars_fileextrapaths.py | QuakeSaver/oelint-adv | e03617b51c7ebdeb8ea245eb61da3e3e03195b37 | [
"BSD-2-Clause"
] | null | null | null | tests/test_class_oelint_vars_fileextrapaths.py | QuakeSaver/oelint-adv | e03617b51c7ebdeb8ea245eb61da3e3e03195b37 | [
"BSD-2-Clause"
] | null | null | null | import pytest
from base import TestBaseClass
class TestClassOelintVarsFilextrapaths(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.fileextrapaths'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
FILESEXTRAPATHS_prepend := "${THISDIR}/file"
'''
},
{
'oelint_adv_test.bb':
'''
FILESEXTRAPATHS_append := "${THISDIR}/file"
'''
},
{
'oelint_adv_test.bb':
'''
FILESEXTRAPATHS += "${THISDIR}/file"
'''
}
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.fileextrapaths'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bbappend':
'''
FILESEXTRAPATHS_prepend := "${THISDIR}/file"
'''
},
{
'oelint_adv_test.bbappend':
'''
FILESEXTRAPATHS_append := "${THISDIR}/file"
'''
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| 27 | 67 | 0.493141 |
acf60d1fa4bdab1c9e89b443dfd1c777ea463f4e | 1,353 | py | Python | test/test_device_group_partial_update.py | pallavigopi/esper-client-py | f7e71d3f25a5d91f35628b414e8abe9e6849d316 | [
"Apache-2.0"
] | null | null | null | test/test_device_group_partial_update.py | pallavigopi/esper-client-py | f7e71d3f25a5d91f35628b414e8abe9e6849d316 | [
"Apache-2.0"
] | null | null | null | test/test_device_group_partial_update.py | pallavigopi/esper-client-py | f7e71d3f25a5d91f35628b414e8abe9e6849d316 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
ESPER API REFERENCE
OpenAPI spec version: 1.0.0
Contact: developer@esper.io
---------------------------------------------------------
Copyright 2019 Shoonya Enterprises Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import unittest
import esperclient
from esperclient.models.device_group_partial_update import DeviceGroupPartialUpdate
from esperclient.rest import ApiException
class TestDeviceGroupPartialUpdate(unittest.TestCase):
"""DeviceGroupPartialUpdate unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDeviceGroupPartialUpdate(self):
"""Test DeviceGroupPartialUpdate"""
model = esperclient.models.device_group_partial_update.DeviceGroupPartialUpdate()
pass
if __name__ == '__main__':
unittest.main()
| 25.528302 | 89 | 0.733925 |
acf60f392b0128489899314b6dfe34cea3819a94 | 2,142 | py | Python | examples/pytorch/rgcn/entity_utils.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | null | null | null | examples/pytorch/rgcn/entity_utils.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | null | null | null | examples/pytorch/rgcn/entity_utils.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | null | null | null | import dgl
import torch as th
from dgl.data.rdf import AIFBDataset, MUTAGDataset, BGSDataset, AMDataset
def load_data(data_name, get_norm=False, inv_target=False):
if data_name == 'aifb':
dataset = AIFBDataset()
elif data_name == 'mutag':
dataset = MUTAGDataset()
elif data_name == 'bgs':
dataset = BGSDataset()
else:
dataset = AMDataset()
# Load hetero-graph
hg = dataset[0]
num_rels = len(hg.canonical_etypes)
category = dataset.predict_category
num_classes = dataset.num_classes
labels = hg.nodes[category].data.pop('labels')
train_mask = hg.nodes[category].data.pop('train_mask')
test_mask = hg.nodes[category].data.pop('test_mask')
train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
if get_norm:
# Calculate normalization weight for each edge,
# 1. / d, d is the degree of the destination node
for cetype in hg.canonical_etypes:
hg.edges[cetype].data['norm'] = dgl.norm_by_dst(hg, cetype).unsqueeze(1)
edata = ['norm']
else:
edata = None
# get target category id
category_id = hg.ntypes.index(category)
g = dgl.to_homogeneous(hg, edata=edata)
# Rename the fields as they can be changed by for example DataLoader
g.ndata['ntype'] = g.ndata.pop(dgl.NTYPE)
g.ndata['type_id'] = g.ndata.pop(dgl.NID)
node_ids = th.arange(g.num_nodes())
# find out the target node ids in g
loc = (g.ndata['ntype'] == category_id)
target_idx = node_ids[loc]
if inv_target:
# Map global node IDs to type-specific node IDs. This is required for
# looking up type-specific labels in a minibatch
inv_target = th.empty((g.num_nodes(),), dtype=th.int64)
inv_target[target_idx] = th.arange(0, target_idx.shape[0],
dtype=inv_target.dtype)
return g, num_rels, num_classes, labels, train_idx, test_idx, target_idx, inv_target
else:
return g, num_rels, num_classes, labels, train_idx, test_idx, target_idx
| 36.305085 | 92 | 0.653595 |
acf60f84ec98dc03115c380f5f92c7c439f0162a | 11,225 | py | Python | src/models/torchvision_models.py | gnocchiflette/NTU-RGB-D | 4f72ff17889294e68efb35b8632b4f0e0ef9d9f3 | [
"MIT"
] | 26 | 2020-03-03T15:26:28.000Z | 2022-01-31T00:47:10.000Z | src/models/torchvision_models.py | adeboissiere/FUSION-human-action-recognition | 4f72ff17889294e68efb35b8632b4f0e0ef9d9f3 | [
"MIT"
] | 11 | 2020-03-31T04:12:04.000Z | 2022-03-11T23:51:45.000Z | src/models/torchvision_models.py | gnocchiflette/NTU-RGB-D | 4f72ff17889294e68efb35b8632b4f0e0ef9d9f3 | [
"MIT"
] | 2 | 2020-05-22T06:47:42.000Z | 2020-11-24T15:00:56.000Z | r"""This module is a copy taken from the official Torchvision documentation of a greater release. The reason it is
included is because we use an older version of Torchvision, as it is the latest available on our cluster. Will update
in the future.
"""
import torch.nn as nn
from torch.hub import load_state_dict_from_url
__all__ = ['r3d_18', 'mc3_18', 'r2plus1d_18']
model_urls = {
'r3d_18': 'https://download.pytorch.org/models/r3d_18-b3b3357e.pth',
'mc3_18': 'https://download.pytorch.org/models/mc3_18-a90a0ba3.pth',
'r2plus1d_18': 'https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth',
}
class Conv3DSimple(nn.Conv3d):
def __init__(self,
in_planes,
out_planes,
midplanes=None,
stride=1,
padding=1):
super(Conv3DSimple, self).__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(3, 3, 3),
stride=stride,
padding=padding,
bias=False)
@staticmethod
def get_downsample_stride(stride):
return (stride, stride, stride)
class Conv2Plus1D(nn.Sequential):
def __init__(self,
in_planes,
out_planes,
midplanes,
stride=1,
padding=1):
super(Conv2Plus1D, self).__init__(
nn.Conv3d(in_planes, midplanes, kernel_size=(1, 3, 3),
stride=(1, stride, stride), padding=(0, padding, padding),
bias=False),
nn.BatchNorm3d(midplanes),
nn.ReLU(inplace=True),
nn.Conv3d(midplanes, out_planes, kernel_size=(3, 1, 1),
stride=(stride, 1, 1), padding=(padding, 0, 0),
bias=False))
@staticmethod
def get_downsample_stride(stride):
return (stride, stride, stride)
class Conv3DNoTemporal(nn.Conv3d):
def __init__(self,
in_planes,
out_planes,
midplanes=None,
stride=1,
padding=1):
super(Conv3DNoTemporal, self).__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, padding, padding),
bias=False)
@staticmethod
def get_downsample_stride(stride):
return (1, stride, stride)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
conv_builder(inplanes, planes, midplanes, stride),
nn.BatchNorm3d(planes),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
conv_builder(planes, planes, midplanes),
nn.BatchNorm3d(planes)
)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):
super(Bottleneck, self).__init__()
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
# 1x1x1
self.conv1 = nn.Sequential(
nn.Conv3d(inplanes, planes, kernel_size=1, bias=False),
nn.BatchNorm3d(planes),
nn.ReLU(inplace=True)
)
# Second kernel
self.conv2 = nn.Sequential(
conv_builder(planes, planes, midplanes, stride),
nn.BatchNorm3d(planes),
nn.ReLU(inplace=True)
)
# 1x1x1
self.conv3 = nn.Sequential(
nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False),
nn.BatchNorm3d(planes * self.expansion)
)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicStem(nn.Sequential):
"""The default conv-batchnorm-relu stem
"""
def __init__(self):
super(BasicStem, self).__init__(
nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2),
padding=(1, 3, 3), bias=False),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
class R2Plus1dStem(nn.Sequential):
"""R(2+1)D stem is different than the default one as it uses separated 3D convolution
"""
def __init__(self):
super(R2Plus1dStem, self).__init__(
nn.Conv3d(3, 45, kernel_size=(1, 7, 7),
stride=(1, 2, 2), padding=(0, 3, 3),
bias=False),
nn.BatchNorm3d(45),
nn.ReLU(inplace=True),
nn.Conv3d(45, 64, kernel_size=(3, 1, 1),
stride=(1, 1, 1), padding=(1, 0, 0),
bias=False),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
class VideoResNet(nn.Module):
def __init__(self, block, conv_makers, layers,
stem, num_classes=400,
zero_init_residual=False):
"""Generic resnet video generator.
Args:
block (nn.Module): resnet building block
conv_makers (list(functions)): generator function for each layer
layers (List[int]): number of blocks per layer
stem (nn.Module, optional): Resnet stem, if None, defaults to conv-bn-relu. Defaults to None.
num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.
zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
"""
super(VideoResNet, self).__init__()
self.inplanes = 64
self.stem = stem()
self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
# init weights
self._initialize_weights()
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
# Flatten the layer to fc
x = x.flatten(1)
x = self.fc(x)
return x
def _make_layer(self, block, conv_builder, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
ds_stride = conv_builder.get_downsample_stride(stride)
downsample = nn.Sequential(
nn.Conv3d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=ds_stride, bias=False),
nn.BatchNorm3d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, conv_builder, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, conv_builder))
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def _video_resnet(arch, pretrained=False, progress=True, **kwargs):
model = VideoResNet(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def r3d_18(pretrained=False, progress=True, **kwargs):
"""Construct 18 layer Resnet3D model as in
https://arxiv.org/abs/1711.11248
Args:
pretrained (bool): If True, returns a model pre-trained on Kinetics-400
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
nn.Module: R3D-18 network
"""
return _video_resnet('r3d_18',
pretrained, progress,
block=BasicBlock,
conv_makers=[Conv3DSimple] * 4,
layers=[2, 2, 2, 2],
stem=BasicStem, **kwargs)
def mc3_18(pretrained=False, progress=True, **kwargs):
"""Constructor for 18 layer Mixed Convolution network as in
https://arxiv.org/abs/1711.11248
Args:
pretrained (bool): If True, returns a model pre-trained on Kinetics-400
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
nn.Module: MC3 Network definition
"""
return _video_resnet('mc3_18',
pretrained, progress,
block=BasicBlock,
conv_makers=[Conv3DSimple] + [Conv3DNoTemporal] * 3,
layers=[2, 2, 2, 2],
stem=BasicStem, **kwargs)
def r2plus1d_18(pretrained=False, progress=True, **kwargs):
"""Constructor for the 18 layer deep R(2+1)D network as in
https://arxiv.org/abs/1711.11248
Args:
pretrained (bool): If True, returns a model pre-trained on Kinetics-400
progress (bool): If True, displays a progress bar of the download to stderr
Returns:
nn.Module: R(2+1)D-18 network
"""
return _video_resnet('r2plus1d_18',
pretrained, progress,
block=BasicBlock,
conv_makers=[Conv2Plus1D] * 4,
layers=[2, 2, 2, 2],
stem=R2Plus1dStem, **kwargs) | 32.348703 | 117 | 0.563029 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.