id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3235643 | <filename>proxy.py
# Copyright (c) 2016-2019, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import os
import bpy
from mathutils import Vector
from .error import *
from .tables import *
from .utils import *
if bpy.app.version < (2,80,0):
from .buttons27 import UseAllBool
else:
from .buttons28 import UseAllBool
#-------------------------------------------------------------
# Make proxy
#-------------------------------------------------------------
def makeProxy1(context, iterations):
ob = context.object
bpy.ops.object.duplicate()
pxy = context.object
makeRawProxy(pxy, iterations)
pxy.name = stripName(ob.name) + ("_Lod%d" % iterations)
if bpy.app.version < (2,80,0):
pxy.layers = list(ob.layers)
insertSeams(ob, pxy)
print("Low-poly %s created" % pxy.name)
return pxy
def stripName(string):
if string[-5:] == "_Mesh":
return string[:-5]
elif (len(string) > 4 and
string[-4] == "." and
string[-3:].isdigit()):
return string[:-4]
else:
return string
def makeRawProxy(pxy, iterations):
mod = pxy.modifiers.new("Proxy", 'DECIMATE')
mod.decimate_type = 'UNSUBDIV'
mod.iterations = iterations
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=mod.name)
#-------------------------------------------------------------
# Find polys
#-------------------------------------------------------------
def findHumanAndProxy(context):
hum = pxy = None
for ob in getSceneObjects(context):
if ob.type == 'MESH':
if hum is None:
hum = ob
else:
pxy = ob
if len(pxy.data.vertices) > len(hum.data.vertices):
ob = pxy
pxy = hum
hum = ob
return hum,pxy
def assocPxyHumVerts(hum, pxy):
pxyHumVerts = {}
hverts = [(hv.co, hv.index) for hv in hum.data.vertices]
hverts.sort()
pverts = [(pv.co, pv.index) for pv in pxy.data.vertices]
pverts.sort()
for pco,pvn in pverts:
hco,hvn = hverts[0]
while (pco-hco).length > 1e-4:
hverts = hverts[1:]
hco,hvn = hverts[0]
pxyHumVerts[pvn] = hvn
humPxyVerts = dict([(hvn,None) for hvn in range(len(hum.data.vertices))])
for pvn,hvn in pxyHumVerts.items():
humPxyVerts[hvn] = pvn
return pxyHumVerts, humPxyVerts
def findPolys(context):
hum,pxy = findHumanAndProxy(context)
print(hum, pxy)
humFaceVerts,humVertFaces = getVertFaces(hum)
pxyFaceVerts,pxyVertFaces = getVertFaces(pxy)
pxyHumVerts,humPxyVerts = assocPxyHumVerts(hum, pxy)
print("PxyHumVerts", len(pxyHumVerts), len(humPxyVerts))
pvn = len(pxy.data.vertices)
pen = len(pxy.data.edges)
newHumPxyVerts = {}
newPxyEdges = []
for e in hum.data.edges:
if e.use_seam:
hvn1,hvn2 = e.vertices
pvn1 = humPxyVerts[hvn1]
pvn2 = humPxyVerts[hvn2]
useAdd = False
if pvn1 is None or pvn2 is None:
if hvn1 in newHumPxyVerts.keys():
pvn1 = newHumPxyVerts[hvn1]
else:
pvn1 = newHumPxyVerts[hvn1] = pvn
pvn += 1
if hvn2 in newHumPxyVerts.keys():
pvn2 = newHumPxyVerts[hvn2]
else:
pvn2 = newHumPxyVerts[hvn2] = pvn
pvn += 1
newPxyEdges.append((pen, pvn1, pvn2))
pen += 1
newVerts = [(pvn,hvn) for hvn,pvn in newHumPxyVerts.items()]
newVerts.sort()
setActiveObject(context, pxy)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='EDGE')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.mark_seam(clear=True)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
print("BEF", len(pxy.data.vertices), len(pxy.data.edges))
pxy.data.vertices.add(len(newVerts))
for pvn,hvn in newVerts:
pv = pxy.data.vertices[pvn]
pv.co = hum.data.vertices[hvn].co.copy()
#print(pv.index,pv.co)
pxy.data.edges.add(len(newPxyEdges))
for pen,pvn1,pvn2 in newPxyEdges:
pe = pxy.data.edges[pen]
pe.vertices = (pvn1,pvn2)
pe.select = True
#print(pe.index, list(pe.vertices), pe.use_seam)
print("AFT", len(pxy.data.vertices), len(pxy.data.edges))
return
pxyHumFaces = {}
for pfn,pfverts in enumerate(pxyFaceVerts):
cands = []
for pvn in pfverts:
hvn = pxyHumVerts[pvn]
for hfn in humVertFaces[hvn]:
cands.append(hfn)
print(pfn, cands)
if len(cands) == 16:
vcount = {}
for hfn in cands:
for hvn in humFaceVerts[hfn]:
if hvn not in vcount.keys():
vcount[hvn] = []
vcount[hvn].append(hfn)
vlist = [(len(hfns),hvn,hfns) for hvn,hfns in vcount.items()]
vlist.sort()
print(vlist)
pxyHumFaces[pfn] = vlist[-1]
print("RES", pfn, pxyHumFaces[pfn])
for hfn in vlist[-1][2]:
hf = hum.data.polygons[hfn]
hf.select = True
class DAZ_OT_FindPolys(bpy.types.Operator):
bl_idname = "daz.find_polys"
bl_label = "Find Polys"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
checkObjectMode(context)
try:
findPolys(context)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Make faithful proxy
#-------------------------------------------------------------
class Proxifier:
def __init__(self, ob):
self.object = ob
self.nfaces = len(ob.data.polygons)
self.nverts = len(ob.data.vertices)
self.faceverts = None
self.vertfaces = None
self.neighbors = None
self.seams = None
self.faces = []
self.matOffset = 10
self.origMnums = {}
self.colorOnly = False
def remains(self):
free = [t for t in self.dirty.values() if not t]
return len(free)
def setup(self, ob, context):
self.faceverts, self.vertfaces, self.neighbors, self.seams = findSeams(ob)
if self.colorOnly:
self.createMaterials()
self.origMnums = {}
for f in ob.data.polygons:
self.origMnums[f.index] = f.material_index
if self.colorOnly:
f.material_index = 0
deselectEverything(ob, context)
self.dirty = dict([(fn,False) for fn in range(self.nfaces)])
for f in ob.data.polygons:
if f.hide:
self.dirty[f.index] = True
newfaces = [[fn] for fn in range(self.nfaces) if self.dirty[fn]]
printStatistics(ob)
return newfaces
def getConnectedComponents(self):
self.clusters = dict([(fn,-1) for fn in range(self.nfaces)])
self.refs = dict([(fn,fn) for fn in range(self.nfaces)])
cnum = 0
for fn in range(self.nfaces):
cnums = []
for fn2 in self.neighbors[fn]:
cn = self.clusters[fn2]
if cn >= 0:
cnums.append(self.deref(cn))
cnums.sort()
if cnums:
self.clusters[fn] = cn0 = cnums[0]
for cn in cnums[1:]:
self.refs[cn] = cn0
else:
self.clusters[fn] = cn0 = cnum
cnum += 1
comps = dict([(cn,[]) for cn in range(cnum)])
taken = dict([(cn,False) for cn in range(cnum)])
for fn in range(self.nfaces):
cn = self.clusters[fn]
cn = self.deref(cn)
comps[cn].append(fn)
self.clusters[fn] = cn
return comps,taken
def deref(self, cn):
cnums = []
while self.refs[cn] != cn:
cnums.append(cn)
cn = self.refs[cn]
for cn1 in cnums:
self.refs[cn1] = cn
return cn
def getNodes(self):
nodes = []
comps,taken = self.getConnectedComponents()
for vn in range(self.nverts):
fnums = self.vertfaces[vn]
if len(fnums) not in [0,2,4]:
for fn in fnums:
if not self.dirty[fn]:
nodes.append(fn)
taken[self.clusters[fn]] = True
for cn,comp in comps.items():
if len(comp) > 0 and not taken[cn]:
nodes.append(comp[0])
return set(nodes)
def make(self, ob, context):
newfaces = self.setup(ob, context)
remains1 = self.remains()
print("Step 0 Remains:", remains1)
nodes = self.getNodes()
for fn in nodes:
self.dirty[fn] = True
for fn in nodes:
self.mergeFaces(fn, newfaces)
prevblock = newfaces
step = 1
remains2 = self.remains()
while remains2 and remains2 < remains1 and step < 50:
print("Step %d Remains:" % step, self.remains())
block = []
for newface in prevblock:
self.mergeNextFaces(newface, block)
newfaces += block
prevblock = block
step += 1
remains1 = remains2
remains2 = self.remains()
print("Step %d Remains:" % step, self.remains())
if self.colorOnly:
self.combineFaces(newfaces)
return
else:
self.buildNewMesh(newfaces)
deleteMidpoints(ob)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles()
bpy.ops.object.mode_set(mode='OBJECT')
printStatistics(ob)
def makeQuads(self, ob, context):
newfaces = self.setup(ob, context)
for fn1 in range(self.nfaces):
if self.dirty[fn1]:
continue
if len(self.faceverts[fn1]) == 3:
for fn2 in self.neighbors[fn1]:
if (len(self.faceverts[fn2]) == 3 and
not self.dirty[fn2] and
fn2 not in self.seams[fn1]):
self.dirty[fn1] = True
self.dirty[fn2] = True
newface = [fn1,fn2]
newfaces.append(newface)
break
if self.colorOnly:
self.combineFaces(newfaces)
return
else:
self.buildNewMesh(newfaces)
printStatistics(ob)
def buildNewMesh(self, newfaces):
from .geometry import makeNewUvloop
free = [[fn] for fn,t in self.dirty.items() if not t]
newfaces += free
ob = self.object
uvtex,uvloop,uvdata = getUvData(ob)
self.vertmap = dict([(vn,-1) for vn in range(self.nverts)])
self.verts = []
self.lastvert = 0
faces = []
uvfaces = []
mats = list(ob.data.materials)
mnums = []
n = 0
for newface in newfaces:
taken = self.findTaken(newface)
n = 0
fn1 = newface[n]
fverts = self.faceverts[fn1]
idx = 0
vn = fverts[idx]
while self.changeFace(vn, fn1, newface) >= 0:
idx += 1
if idx == len(fverts):
n += 1
if n == len(newface):
for fn in newface:
print(fn, self.faceverts[fn])
raise RuntimeError("BUG")
fn1 = newface[n]
fverts = self.faceverts[fn1]
idx = 0
vn = fverts[idx]
face = [self.getVert(vn)]
uvface = [uvdata[fn1][idx]]
mnums.append(self.origMnums[fn1])
taken[vn] = True
done = False
while not done:
fn2 = self.changeFace(vn, fn1, newface)
if fn2 >= 0:
fn1 = fn2
fverts = self.faceverts[fn2]
idx = getIndex(vn, fverts)
idx = (idx+1)%len(fverts)
vn = fverts[idx]
if taken[vn]:
done = True
else:
face.append(self.getVert(vn))
uvface.append(uvdata[fn1][idx])
taken[vn] = True
if len(face) >= 3:
faces.append(face)
uvfaces.append(uvface)
else:
print("Non-face:", face)
me = bpy.data.meshes.new("New")
me.from_pydata(self.verts, [], faces)
uvloop = makeNewUvloop(me, "Uvloop", True)
n = 0
for uvface in uvfaces:
for uv in uvface:
uvloop.data[n].uv = uv
n += 1
for mat in mats:
me.materials.append(mat)
for fn,mn in enumerate(mnums):
f = me.polygons[fn]
f.material_index = mn
f.use_smooth = True
vgnames = [vgrp.name for vgrp in ob.vertex_groups]
weights = dict([(vn,{}) for vn in range(self.nverts)])
for vn,v in enumerate(ob.data.vertices):
nvn = self.vertmap[vn]
if nvn >= 0:
for g in v.groups:
weights[nvn][g.group] = g.weight
skeys = []
if ob.data.shape_keys:
for skey in ob.data.shape_keys.key_blocks:
data = dict([(vn, skey.data[vn].co) for vn in range(self.nverts)])
skeys.append((skey.name, skey.value, skey.slider_min, skey.slider_max, data))
from .driver import getShapekeyDrivers, copyShapeKeyDrivers
drivers = getShapekeyDrivers(ob)
ob.data = me
ob.vertex_groups.clear()
vgrps = {}
for gn,vgname in enumerate(vgnames):
vgrps[gn] = ob.vertex_groups.new(name=vgname)
for vn,grp in weights.items():
for gn,w in grp.items():
vgrps[gn].add([vn], w, 'REPLACE')
for (sname, value, min, max, data) in skeys:
skey = ob.shape_key_add(name=sname)
skey.slider_min = min
skey.slider_max = max
skey.value = value
for vn,co in data.items():
nvn = self.vertmap[vn]
if nvn >= 0:
skey.data[nvn].co = co
copyShapeKeyDrivers(ob, drivers)
def changeFace(self, vn, fn1, newface):
for fn2 in newface:
if (fn2 != fn1 and
vn in self.faceverts[fn2]):
return fn2
return -1
def getVert(self, vn):
nvn = self.vertmap[vn]
if nvn < 0:
self.verts.append(self.object.data.vertices[vn].co)
nvn = self.vertmap[vn] = self.lastvert
self.lastvert += 1
return nvn
def findTaken(self, newface):
taken = dict([vn,False] for fn in newface for vn in self.faceverts[fn])
hits = dict([vn,0] for fn in newface for vn in self.faceverts[fn])
for fn in newface:
for vn in self.faceverts[fn]:
hits[vn] += 1
if hits[vn] > 2:
taken[vn] = True
return taken
def combineFaces(self, newfaces):
ob = self.object
maxmnum = self.colorFaces(newfaces)
print("Max material number:", maxmnum)
print("Adding faces")
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='FACE')
bpy.ops.mesh.select_all(action='DESELECT')
count = 0
for mn in range(maxmnum):
if count % 25 == 0:
print(" ", count)
if mn % self.matOffset == 0:
continue
bpy.ops.object.mode_set(mode='OBJECT')
ob.active_material_index = mn
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.object.material_slot_select()
try:
bpy.ops.mesh.edge_face_add()
except RuntimeError:
pass
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
count += 1
printStatistics(ob)
def mergeNextFaces(self, face, newfaces):
me = self.object.data
if len(face) < 2:
return
nextfaces = [face]
while nextfaces:
faces = nextfaces
nextfaces = []
for face in faces:
for fn0 in face:
mn = self.origMnums[fn0]
for fn1 in face:
if (fn1 in self.neighbors[fn0] and
mn == self.origMnums[fn1]):
newface = self.mergeSide(fn0, fn1, newfaces, mn)
if newface:
if len(newface) == 4:
for fn in newface:
me.polygons[fn].select = True
nextfaces.append(newface)
break
def mergeSide(self, fn0, fn1, newfaces, mn):
for fn2 in self.neighbors[fn0]:
if (self.dirty[fn2] or
fn2 in self.seams[fn0] or
fn2 in self.seams[fn1]
):
continue
for fn3 in self.neighbors[fn1]:
if (fn3 == fn2 or
self.dirty[fn3] or
fn3 not in self.neighbors[fn2] or
fn3 in self.seams[fn0] or
fn3 in self.seams[fn1] or
fn3 in self.seams[fn2]
):
continue
self.dirty[fn2] = True
self.dirty[fn3] = True
newface = self.mergeFacePair([fn2,fn3], newfaces, mn)
return newface
return None
def mergeFaces(self, fn0, newfaces):
newface = [fn0]
self.dirty[fn0] = True
mn = self.origMnums[fn0]
for fn1 in self.neighbors[fn0]:
if (fn1 not in self.seams[fn0] and
not self.dirty[fn1] and
mn == self.origMnums[fn1]):
newface.append(fn1)
self.dirty[fn1] = True
break
if len(newface) == 2:
return self.mergeFacePair(newface, newfaces, mn)
else:
newfaces.append(newface)
return newface
def mergeFacePair(self, newface, newfaces, mn):
fn0,fn1 = newface
for fn2 in self.neighbors[fn0]:
if (fn2 != fn1 and
self.sharedVertex(fn1, fn2) and
fn2 not in self.seams[fn0] and
not self.dirty[fn2] and
mn == self.origMnums[fn2]):
newface.append(fn2)
self.dirty[fn2] = True
break
if len(newface) == 3:
fn2 = newface[2]
for fn3 in self.neighbors[fn1]:
if (fn3 != fn0 and
fn3 != fn2 and
fn3 in self.neighbors[fn2] and
not self.dirty[fn3] and
mn == self.origMnums[fn3]):
newface.append(fn3)
self.dirty[fn3] = True
break
if len(newface) == 3:
fn0,fn1,fn2 = newface
self.dirty[fn2] = False
newface = [fn0,fn1]
newfaces.append(newface)
return newface
def sharedVertex(self, fn1, fn2):
for vn in self.faceverts[fn1]:
if vn in self.faceverts[fn2]:
return True
return False
def colorFaces(self, newfaces):
me = self.object.data
matnums = dict((fn,0) for fn in range(self.nfaces))
maxmnum = 0
for newface in newfaces:
mnums = []
for fn in newface:
mnums += [matnums[fn2] for fn2 in self.neighbors[fn]]
mn = 1
while mn in mnums:
mn += 1
if mn > maxmnum:
maxmnum = mn
for fn in newface:
f = me.polygons[fn]
f.material_index = matnums[fn] = mn
return maxmnum
def createMaterials(self):
me = self.object.data
mats = [mat for mat in me.materials]
me.materials.clear()
n = 0
for r in range(3):
for g in range(3):
for b in range(3):
mat = bpy.data.materials.new("Mat-%02d" % n)
n += 1
mat.diffuse_color[0:3] = (r/2, g/2, b/2)
me.materials.append(mat)
def selectRandomComponents(self, context):
import random
ob = context.object
scn = context.scene
deselectEverything(ob, context)
self.faceverts, self.vertfaces = getVertFaces(ob)
self.neighbors = findNeighbors(range(self.nfaces), self.faceverts, self.vertfaces)
comps,taken = self.getConnectedComponents()
for comp in comps.values():
if random.random() > scn.DazRandomKeepFraction:
for fn in comp:
f = ob.data.polygons[fn]
if not f.hide:
f.select = True
bpy.ops.object.mode_set(mode='EDIT')
def getUvData(ob):
from collections import OrderedDict
uvtex = getUvTextures(ob.data)
uvloop = ob.data.uv_layers[0]
uvdata = OrderedDict()
m = 0
for fn,f in enumerate(ob.data.polygons):
n = len(f.vertices)
uvdata[fn] = [uvloop.data[j].uv for j in range(m,m+n)]
m += n
return uvtex,uvloop,uvdata
def deleteMidpoints(ob):
edgeverts, vertedges = getVertEdges(ob)
faceverts, vertfaces = getVertFaces(ob)
uvtex,uvloop,uvdata = getUvData(ob)
for vn,v in enumerate(ob.data.vertices):
if (len(vertedges[vn]) == 2 and
len(vertfaces[vn]) <= 2):
e = vertedges[vn][0]
vn1,vn2 = e.vertices
if vn1 == vn:
v.co = ob.data.vertices[vn2].co
moveUv(vn, vn2, vertfaces[vn], faceverts, uvdata)
elif vn2 == vn:
v.co = ob.data.vertices[vn1].co
moveUv(vn, vn1, vertfaces[vn], faceverts, uvdata)
else:
halt
m = 0
for uvs in uvdata.values():
for j,uv in enumerate(uvs):
uvloop.data[m+j].uv = uv
m += len(uvs)
def moveUv(vn1, vn2, fnums, faceverts, uvdata):
for fn in fnums:
fverts = faceverts[fn]
n1 = getIndex(vn1, fverts)
n2 = getIndex(vn2, fverts)
uvdata[fn][n1] = uvdata[fn][n2]
def getIndex(vn, verts):
for n,vn1 in enumerate(verts):
if vn1 == vn:
return n
#-------------------------------------------------------------
# Insert seams
#-------------------------------------------------------------
def insertSeams(hum, pxy):
for pe in pxy.data.edges:
pe.use_seam = False
humPxy,pxyHum = identifyVerts(hum, pxy)
pvn = pvn0 = len(pxy.data.vertices)
pen = len(pxy.data.edges)
newVerts = {}
newEdges = {}
seams = [e for e in hum.data.edges if e.use_seam]
nseams = {}
for e in seams:
vn1,vn2 = e.vertices
old1 = (vn1 in humPxy.keys())
old2 = (vn2 in humPxy.keys())
if old1 and old2:
pvn1 = humPxy[vn1]
pvn2 = humPxy[vn2]
if (pvn1 in nseams.keys() and
pvn2 not in nseams[pvn1]):
newEdges[pen] = (pvn1, pvn2)
pen += 1
elif old1:
pvn1 = humPxy[vn1]
pvn2 = pvn
newVerts[pvn2] = hum.data.vertices[vn2].co
humPxy[vn2] = pvn2
pvn += 1
newEdges[pen] = (pvn1, pvn2)
pen += 1
elif old2:
pvn1 = pvn
newVerts[pvn1] = hum.data.vertices[vn1].co
humPxy[vn1] = pvn1
pvn2 = humPxy[vn2]
pvn += 1
newEdges[pen] = (pvn1, pvn2)
pen += 1
else:
pvn1 = pvn
newVerts[pvn1] = hum.data.vertices[vn1].co
humPxy[vn1] = pvn1
pvn2 = pvn+1
newVerts[pvn2] = hum.data.vertices[vn2].co
humPxy[vn2] = pvn2
pvn += 2
newEdges[pen] = (pvn1, pvn2)
pen += 1
if pvn1 not in nseams.keys():
nseams[pvn1] = [pvn2]
else:
nseams[pvn1].append(pvn2)
if pvn2 not in nseams.keys():
nseams[pvn2] = [pvn1]
else:
nseams[pvn2].append(pvn1)
if 1367 in [pvn1,pvn2]:
print("O", vn1, vn2, pvn, pvn1, pvn2, old1, old2)
print(" ", hum.data.vertices[vn1].co)
print(" ", hum.data.vertices[vn2].co)
print(" ", nseams[1367])
print(" ", pxyHum[1367])
pvn0 = len(pxy.data.vertices)
pxy.data.vertices.add(len(newVerts))
for pvn,co in newVerts.items():
pxy.data.vertices[pvn].co = co
#for pvn in range(pvn0, pvn0+3):
# print(" ", pvn, pxy.data.vertices[pvn].co)
pxy.data.edges.add(len(newEdges))
for pen,pverts in newEdges.items():
pe = pxy.data.edges[pen]
pe.vertices = pverts
pe.select = True
for pe in pxy.data.edges:
pvn1,pvn2 = pe.vertices
if (pvn1 in nseams.keys() and
pvn2 in nseams[pvn1]):
pe.use_seam = True
def identifyVerts(hum, pxy):
'''
for e in hum.data.edges:
if e.use_seam:
vn1,vn2 = e.vertices
if vn1 < vn2:
v1 = hum.data.vertices[vn1]
v2 = hum.data.vertices[vn2]
verts += [(v1.co, ("E", vn1, vn2, e.index)),
(v2.co, ("E", vn2, vn1, e.index))]
'''
hverts = [(v.co, ("H", v.index, v.co)) for v in hum.data.vertices]
pverts = [(v.co, ("P", v.index, v.co)) for v in pxy.data.vertices]
verts = hverts + pverts
verts.sort()
humPxy = {}
pxyHum = {}
nverts = len(verts)
for m,vert in enumerate(verts):
co1,data1 = vert
if data1[0] == "P":
mindist = 1e7
pvn = data1[1]
for j in range(-20,20):
n = min(max(0, m+j), nverts-1)
co2,data2 = verts[n]
dist = (co1-co2).length
if data2[0] == "H" and dist < mindist:
mindist = dist
vn = data2[1]
humPxy[vn] = pvn
pxyHum[pvn] = vn
if mindist > 1e-7:
pco = pxy.data.vertices[pvn]
co = hum.data.vertices[vn]
print("DIST", pvn, vn, pco, co, mindist)
return humPxy, pxyHum
def deselectEverything(ob, context):
setActiveObject(context, ob)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='FACE')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_mode(type='EDGE')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_mode(type='VERT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
#-------------------------------------------------------------
# Make Proxy
#-------------------------------------------------------------
class MakeProxy():
@classmethod
def poll(self, context):
return (context.object and context.object.type == 'MESH')
def execute(self, context):
checkObjectMode(context)
try:
self.makeProxies(context)
except DazError:
handleDazError(context)
return {'FINISHED'}
def makeProxies(self, context):
meshes,active = getSelectedObjects(context, 'MESH')
print("-----")
errors = []
for ob in meshes:
activateObject(context, ob)
print("\nMake %s low-poly" % ob.name)
self.makeProxy(ob, context, errors)
restoreSelectedObjects(context, meshes, active)
if errors:
msg = "Cannot make low-poly version\nof meshes with shapekeys:"
for ob in errors:
msg += ("\n %s" % ob.name)
raise DazError(msg)
class DAZ_OT_MakeQuickProxy(MakeProxy, bpy.types.Operator):
bl_idname = "daz.make_quick_proxy"
bl_label = "Make Quick Low-poly"
bl_description = "Replace all selected meshes by low-poly versions, using a quick algorithm that does not preserve UV seams"
bl_options = {'UNDO'}
def makeProxy(self, ob, context, errors):
scn = context.scene
if ob.data.shape_keys:
errors.append(ob)
return None
applyShapeKeys(ob)
printStatistics(ob)
makeRawProxy(ob, scn.DazIterations)
printStatistics(ob)
return ob
class DAZ_OT_MakeFaithfulProxy(MakeProxy, bpy.types.Operator):
bl_idname = "daz.make_faithful_proxy"
bl_label = "Make Faithful Low-poly"
bl_description = "Replace all selected meshes by low-poly versions, using a experimental algorithm that does preserve UV seams"
bl_options = {'UNDO'}
def makeProxy(self, ob, context, _errors):
return Proxifier(ob).make(ob, context)
#-------------------------------------------------------------
# Quadify
#-------------------------------------------------------------
class DAZ_OT_Quadify(MakeProxy, bpy.types.Operator):
bl_idname = "daz.quadify"
bl_label = "Quadify Triangles"
bl_description = "Join triangles to quads"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return (context.object and context.object.type == 'MESH')
def execute(self, context):
meshes,active = getSelectedObjects(context, 'MESH')
print("-----")
errors = []
for ob in meshes:
activateObject(context, ob)
print("\nQuadify %s" % ob.name)
printStatistics(ob)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='FACE')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.tris_convert_to_quads()
bpy.ops.object.mode_set(mode='OBJECT')
printStatistics(ob)
restoreSelectedObjects(context, meshes, active)
return {'FINISHED'}
def getSelectedObjects(context, type):
objects = []
for ob in getSceneObjects(context):
if (getSelected(ob) and
ob.type == type and
not getattr(ob, HideViewport) and
inSceneLayer(context, ob)):
objects.append(ob)
return objects, context.object
def restoreSelectedObjects(context, meshes, active):
for ob in meshes:
setSelected(ob, True)
setActiveObject(context, active)
#-------------------------------------------------------------
# Find seams
#-------------------------------------------------------------
def proxifyAll(context):
duplets = {}
dummy = bpy.data.meshes.new("Dummy")
for ob in getSceneObjects(context):
if ob.type == 'MESH':
if ob.data.name in duplets.keys():
duplets[ob.data.name].append(ob)
ob.data = dummy
else:
duplets[ob.data.name] = []
print("Making low-poly versions:")
for ob in getSceneObjects(context):
if (ob.type == 'MESH' and
ob.data != dummy and
getSelected(ob)):
setActiveObject(context, ob)
print(" %s: %d verts" % (ob.name, len(ob.data.vertices)))
applyShapeKeys(ob)
makeRawProxy(ob, scn.DazIterations)
print("Restoring duplets")
for mname,obs in duplets.items():
me = bpy.data.meshes[mname]
for ob in obs:
ob.data = me
bpy.data.meshes.remove(dummy)
class DAZ_OT_ProxifyAll(bpy.types.Operator, UseAllBool):
bl_idname = "daz.proxify_all"
bl_label = "Make All Low-Poly"
bl_description = "Replace all (selected) meshes by low-poly versions"
bl_options = {'UNDO'}
def execute(self, context):
checkObjectMode(context)
try:
proxifyAll(context, self.useAll)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Split n-gons
#-------------------------------------------------------------
def splitNgons(ob, context):
activateObject(context, ob)
printStatistics(ob)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='FACE')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
for f in ob.data.polygons:
if (len(f.vertices) > 4 and not f.hide):
f.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.quads_convert_to_tris(ngon_method='BEAUTY')
#bpy.ops.mesh.tris_convert_to_quads()
bpy.ops.object.mode_set(mode='OBJECT')
printStatistics(ob)
class DAZ_OT_SplitNgons(bpy.types.Operator):
bl_idname = "daz.split_ngons"
bl_label = "Split n-gons"
bl_description = "Split all polygons with five or more corners into triangles"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
try:
meshes,active = getSelectedObjects(context, 'MESH')
for ob in meshes:
print("\nSplit n-gons of %s" % ob.name)
splitNgons(ob, context)
restoreSelectedObjects(context, meshes, active)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Find seams
#-------------------------------------------------------------
def findSeams(ob):
print("Find seams", ob)
#ob.data.materials.clear()
faceverts,vertfaces = getVertFaces(ob)
nfaces = len(faceverts)
neighbors = findNeighbors(range(nfaces), faceverts, vertfaces)
texverts,texfaces = findTexVerts(ob, vertfaces)
_,texvertfaces = getVertFaces(ob, texverts, None, texfaces)
texneighbors = findNeighbors(range(nfaces), texfaces, texvertfaces)
seams = dict([(fn,[]) for fn in range(nfaces)])
for fn1,nn1 in neighbors.items():
for fn2 in nn1:
if (fn2 not in texneighbors[fn1]):
if fn1 in seams.keys():
seams[fn1].append(fn2)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='EDGE')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.mark_seam(clear=True)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
for e in ob.data.edges:
vn1,vn2 = e.vertices
for fn1 in vertfaces[vn1]:
f1 = ob.data.polygons[fn1]
for fn2 in vertfaces[vn2]:
f2 = ob.data.polygons[fn2]
if (vn2 in f1.vertices and
vn1 in f2.vertices and
fn1 != fn2):
if fn2 in seams[fn1]:
e.select = True
_,vertedges = getVertEdges(ob)
_,edgefaces = getEdgeFaces(ob, vertedges)
for e in ob.data.edges:
if len(edgefaces[e.index]) != 2:
e.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.mark_seam(clear=False)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
print("Seams found")
return faceverts, vertfaces, neighbors,seams
class DAZ_OT_FindSeams(bpy.types.Operator):
bl_idname = "daz.find_seams"
bl_label = "Find Seams"
bl_description = "Create seams based on existing UVs"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
checkObjectMode(context)
try:
findSeams(context.object)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Select random strands
#-------------------------------------------------------------
class DAZ_OT_SelectRandomStrands(bpy.types.Operator):
bl_idname = "daz.select_random_strands"
bl_label = "Select Random Strands"
bl_description = "Select random subset of strands selected in UV space"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
checkObjectMode(context)
try:
ob = context.object
Proxifier(ob).selectRandomComponents(context)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Apply morphs
#-------------------------------------------------------------
def applyShapeKeys(ob):
from .morphing import getShapeKeyCoords
if ob.type != 'MESH':
return
if ob.data.shape_keys:
skeys,coords = getShapeKeyCoords(ob)
skeys.reverse()
for skey in skeys:
ob.shape_key_remove(skey)
skey = ob.data.shape_keys.key_blocks[0]
ob.shape_key_remove(skey)
for v in ob.data.vertices:
v.co = coords[v.index]
class DAZ_OT_ApplyMorphs(bpy.types.Operator):
bl_idname = "daz.apply_morphs"
bl_label = "Apply Morphs"
bl_description = "Apply all shapekeys"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
checkObjectMode(context)
try:
for ob in getSceneObjects(context):
if getSelected(ob):
applyShapeKeys(ob)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Print statistics
#-------------------------------------------------------------
def printStatistics(ob):
print("Verts: %d, Edges: %d, Faces: %d" %
(len(ob.data.vertices), len(ob.data.edges), len(ob.data.polygons)))
class DAZ_OT_PrintStatistics(bpy.types.Operator):
bl_idname = "daz.print_statistics"
bl_label = "Print Statistics"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
checkObjectMode(context)
print("--------- Statistics ------------")
try:
for ob in getSceneObjects(context):
if getSelected(ob) and ob.type == 'MESH':
print("Object: %s" % ob.name)
printStatistics(ob)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Add mannequin
#-------------------------------------------------------------
def remapBones(bone, scn, vgrps, majors, remap):
special = {
'SOLID' : ["head"],
'JAW' : ["head", "lowerjaw", "leye", "reye"],
'FULL' : []
}
if bone.name.lower() in special[scn.DazMannequinHead]:
if bone.name in vgrps.keys():
remap = vgrps[bone.name].index
elif remap is not None:
if bone.name in vgrps.keys():
gn = vgrps[bone.name].index
if gn in majors.keys():
majors[remap] += majors[gn]
del majors[gn]
for child in bone.children:
remapBones(child, scn, vgrps, majors, remap)
def addMannequins(context):
objects = getSceneObjects(context)
selected = [ob for ob in objects if getSelected(ob)]
ob = context.object
rig = ob.parent
if not (rig and rig.type == 'ARMATURE'):
raise DazError("Mesh %s has no armature parent" % ob)
setActiveObject(context, rig)
bpy.ops.object.mode_set(mode='OBJECT')
oldlayers = list(rig.data.layers)
rig.data.layers = 32*[True]
# Create group/collection
mangrp = None
scn = context.scene
coll = getCollection(context)
if not scn.DazUseMannequinGroup:
pass
elif bpy.app.version <= (2,80,0):
for grp in bpy.data.groups:
if grp.name == scn.DazMannequinGroup:
mangrp = grp
break
if mangrp is None:
mangrp = bpy.data.groups.new(scn.DazMannequinGroup)
if rig.name not in mangrp.objects.keys():
mangrp.objects.link(rig)
else:
coll = None
for coll1 in scn.collection.children:
if coll1.name == scn.DazMannequinGroup:
coll = coll1
break
if coll is None:
coll = bpy.data.collections.new(name=scn.DazMannequinGroup)
scn.collection.children.link(coll)
if rig.name not in coll.objects.keys():
coll.objects.link(rig)
# Add mannequin objects for selected meshes
meshes = [ob for ob in objects if (getSelected(ob) and ob.type == 'MESH')]
for ob in meshes:
addMannequin(ob, context, rig, coll, mangrp)
for ob in getSceneObjects(context):
if ob in selected:
setSelected(ob, True)
else:
setSelected(ob, False)
rig.data.layers = oldlayers
def addMannequin(ob, context, rig, coll, mangrp):
from random import random
from .node import setParent
from .guess import getSkinMaterial
scn = context.scene
mat = bpy.data.materials.new("%sMannequin" % ob.name)
mat.diffuse_color[0:3] = (random(), random(), random())
for omat in ob.data.materials:
mat.diffuse_color = omat.diffuse_color
data = getSkinMaterial(omat)
if data and data[0] == 'Skin':
break
faceverts, vertfaces = getVertFaces(ob)
majors = {}
skip = []
for vgrp in ob.vertex_groups:
if vgrp.name in rig.data.bones:
majors[vgrp.index] = []
else:
skip.append(vgrp.index)
for v in ob.data.vertices:
wmax = 1e-3
vbest = None
for g in v.groups:
if g.weight > wmax and g.group not in skip:
wmax = g.weight
vbest = v
gbest = g.group
if vbest is not None:
majors[gbest].append(vbest)
roots = [bone for bone in rig.data.bones if bone.parent is None]
for bone in roots:
remapBones(bone, scn, ob.vertex_groups, majors, None)
obverts = ob.data.vertices
vmax = 0.49
if ob.data.shape_keys:
for skey in ob.data.shape_keys.key_blocks:
if skey.value > vmax:
print("Using shapekey %s for %s locations" % (skey.name, ob.name))
obverts = skey.data
vmax = skey.value
nobs = []
for vgrp in ob.vertex_groups:
if (vgrp.name not in rig.pose.bones.keys() or
vgrp.index not in majors.keys()):
continue
fnums = []
for v in majors[vgrp.index]:
for fn in vertfaces[v.index]:
fnums.append(fn)
fnums = list(set(fnums))
nverts = []
nfaces = []
for fn in fnums:
f = ob.data.polygons[fn]
nverts += f.vertices
nfaces.append(f.vertices)
if not nfaces:
continue
nverts = list(set(nverts))
nverts.sort()
bone = rig.data.bones[vgrp.name]
head = bone.head_local
verts = [obverts[vn].co-head for vn in nverts]
assoc = dict([(vn,n) for n,vn in enumerate(nverts)])
faces = []
for fverts in nfaces:
faces.append([assoc[vn] for vn in fverts])
name = ob.name[0:3] + "_" + vgrp.name
me = bpy.data.meshes.new(name)
me.from_pydata(verts, [], faces)
nob = bpy.data.objects.new(name, me)
coll.objects.link(nob)
nob.location = head
nob.lock_location = nob.lock_rotation = nob.lock_scale = (True,True,True)
nobs.append((nob, rig, bone, me))
updateScene(context, updateDepsGraph=True)
for nob, rig, bone, me in nobs:
setParent(context, nob, rig, bone.name, update=False)
nob.DazMannequin = True
if mangrp:
mangrp.objects.link(nob)
me.materials.append(mat)
return nobs
class DAZ_OT_AddMannequin(bpy.types.Operator):
bl_idname = "daz.add_mannequin"
bl_label = "Add Mannequins"
bl_description = "Add mannequins to selected meshes. Don't change rig after this."
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
checkObjectMode(context)
try:
addMannequins(context)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Add push
#-------------------------------------------------------------
def addPush(context):
hasShapeKeys = []
for ob in getSceneObjects(context):
if getSelected(ob) and ob.type == 'MESH':
#applyShapeKeys(ob)
if ob.data.shape_keys:
hasShapeKeys.append(ob)
else:
basic = ob.shape_key_add(name="Basic")
skey = ob.shape_key_add(name="Push")
scale = ob.DazScale
for n,v in enumerate(ob.data.vertices):
skey.data[n].co += v.normal*scale
if hasShapeKeys:
msg = ("Push added to meshes with shapekeys:\n " + "\n ".join([ob.name for ob in hasShapeKeys]))
raise DazError(msg, True)
class DAZ_OT_AddPush(bpy.types.Operator):
bl_idname = "daz.add_push"
bl_label = "Add Push"
bl_description = "Add a push shapekey"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
checkObjectMode(context)
try:
addPush(context)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Add subsurf
#-------------------------------------------------------------
def addSubsurf(context):
for ob in getSceneObjects(context):
if getSelected(ob) and ob.type == 'MESH':
mod = ob.modifiers.new('SUBSURF', 'SUBSURF')
mod.levels = 0
mod.render_levels = 1
class DAZ_OT_AddSubsurf(bpy.types.Operator):
bl_idname = "daz.add_subsurf"
bl_label = "Add Subsurf"
bl_description = "Add a subsurf modifier"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
checkObjectMode(context)
try:
addSubsurf(context)
except DazError:
handleDazError(context)
return {'FINISHED'}
#-------------------------------------------------------------
# Make deflection
#-------------------------------------------------------------
class DAZ_OT_MakeDeflection(bpy.types.Operator):
bl_idname = "daz.make_deflection"
bl_label = "Make Deflection"
bl_description = "Make a deflection object"
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object and context.object.type == 'MESH'
def execute(self, context):
try:
self.make(context)
except DazError:
handleDazError(context)
return {'FINISHED'}
def make(self, context):
from .load_json import loadJson
ob = context.object
coll = getCollection(context)
folder = os.path.dirname(__file__)
filepath = os.path.join(folder, "data", "lowpoly", ob.DazMesh.lower()+".json")
print(filepath)
struct = loadJson(filepath, mustOpen=True)
vnums = struct["vertices"]
verts = [ob.data.vertices[vn].co for vn in struct["vertices"]]
faces = struct["faces"]
me = bpy.data.meshes.new(ob.data.name+"Deflect")
me.from_pydata(verts, [], faces)
nob = bpy.data.objects.new(ob.name+"Deflect", me)
coll.objects.link(nob)
setActiveObject(context, nob)
vgrps = dict([(vgrp.index, vgrp) for vgrp in ob.vertex_groups])
ngrps = {}
for vgrp in ob.vertex_groups:
ngrp = nob.vertex_groups.new(name=vgrp.name)
ngrps[ngrp.index] = ngrp
for nv in nob.data.vertices:
v = ob.data.vertices[vnums[nv.index]]
for g in v.groups:
ngrp = ngrps[g.group]
ngrp.add([nv.index], g.weight, 'REPLACE')
#----------------------------------------------------------
# Initialize
#----------------------------------------------------------
classes = [
DAZ_OT_FindPolys,
DAZ_OT_MakeQuickProxy,
DAZ_OT_MakeFaithfulProxy,
DAZ_OT_Quadify,
DAZ_OT_ProxifyAll,
DAZ_OT_SplitNgons,
DAZ_OT_FindSeams,
DAZ_OT_SelectRandomStrands,
DAZ_OT_ApplyMorphs,
DAZ_OT_PrintStatistics,
DAZ_OT_AddMannequin,
DAZ_OT_AddPush,
DAZ_OT_AddSubsurf,
DAZ_OT_MakeDeflection,
]
def initialize():
from bpy.props import BoolProperty, EnumProperty, StringProperty
bpy.types.Object.DazMannequin = BoolProperty(default = False)
bpy.types.Scene.DazMannequinHead = EnumProperty(
items = [('SOLID', "Solid", "Solid head"),
('JAW', "Jaw", "Head with jaws and eyes"),
('FULL', "Full", "Head with all face bones"),
],
name = "Head",
description = "How to make the mannequin head",
default = 'JAW')
if bpy.app.version <= (2,80,0):
usename = "Add To Group"
usedesc = "Add mannequin to group"
grpname = "Group"
grpdesc = "Add mannequin to this group"
else:
usename = "Add To Collection"
usedesc = "Add mannequin to collection"
grpname = "Collection"
grpdesc = "Add mannequin to this collection"
bpy.types.Scene.DazUseMannequinGroup = BoolProperty(
name = usename,
description = usedesc,
default = True)
bpy.types.Scene.DazMannequinGroup = StringProperty(
name = grpname,
description = grpdesc,
default = "Mannequin")
for cls in classes:
bpy.utils.register_class(cls)
def uninitialize():
for cls in classes:
bpy.utils.unregister_class(cls)
| StarcoderdataPython |
32088 | import numpy as np
import tensorflow as tf
def split_reim(array):
"""Split a complex valued matrix into its real and imaginary parts.
Args:
array(complex): An array of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): An array of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = np.real(array)
imag = np.imag(array)
split_array = np.stack((real, imag), axis=3)
return split_array
def split_reim_tensor(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
split_array = tf.stack((real, imag), axis=3)
return split_array
def split_reim_channels(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
n_ch = array.get_shape().as_list()[3]
split_array = tf.concat((real, imag), axis=3)
return split_array
def join_reim(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): An complex-valued array of shape (batch_size, N, N, 1)
"""
joined_array = array[:, :, :, 0] + 1j * array[:, :, :, 1]
return joined_array
def join_reim_tensor(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N)
"""
joined_array = tf.cast(array[:, :, :, 0], 'complex64') + \
1j * tf.cast(array[:, :, :, 1], 'complex64')
return joined_array
def join_reim_channels(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, ch)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N, ch/2)
"""
ch = array.get_shape().as_list()[3]
joined_array = tf.cast(array[:,
:,
:,
:int(ch / 2)],
dtype=tf.complex64) + 1j * tf.cast(array[:,
:,
:,
int(ch / 2):],
dtype=tf.complex64)
return joined_array
def convert_to_frequency_domain(images):
"""Convert an array of images to their Fourier transforms.
Args:
images(float): An array of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed array of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim(np.fft.fft2(join_reim(images), axes=(1, 2)))
return spectra
def convert_tensor_to_frequency_domain(images):
"""Convert a tensor of images to their Fourier transforms.
Args:
images(float): A tensor of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed tensor of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim_tensor(tf.signal.fft2d(join_reim_tensor(images)))
return spectra
def convert_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim(np.fft.ifft2(join_reim(spectra), axes=(1, 2)))
return images
def convert_tensor_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim_tensor(tf.signal.ifft2d(join_reim_tensor(spectra)))
return images
| StarcoderdataPython |
1716506 |
from typing import List
from base import version
class Solution:
@version("220, 15.4mb")
def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:
depth = -1
h, w = len(grid) - 1, len(grid[0]) - 1
prev = [(-1, -1)]
while prev:
depth += 1
cur = []
if (h, w) in prev:
return depth
for (x, y) in prev:
for (m, n) in [(0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1)]:
try:
assert (x + m) >= 0 and (y + n) >= 0
if not grid[x + m][y + n]:
cur.append((x + m, y + n))
grid[x + m][y + n] = 1
except (IndexError, AssertionError):
pass
prev = cur
return -1
| StarcoderdataPython |
47536 | <reponame>tadodotcom/pyjoulescope<filename>joulescope/usb/api.py
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The USB backend which must be implemented for each platform type.
This module defines the USB backend. Each target platform
(such as Windows, Mac OS/X and Linux), must implement backend that conforms
to this API.
This API is **not** thread-safe. All methods and functions must be invoked
from a single thread.
"""
class DeviceEvent:
ENDPOINT_CALLBACK_STOP = -1 # a callback indicated that streaming should stop
UNDEFINED = 0
COMMUNICATION_ERROR = 1 # an communicate error that prevents this device from functioning, such as device removal
ENDPOINT_CALLBACK_EXCEPTION = 2 # a callback threw an exception
class DeviceDriverApi:
"""The device driver API.
This API is **not** thread-safe. All methods must be invoked from a
single thread.
"""
def __str__(self):
"""Get the user-friendly device string.
:return: f'{product_id_str}:{serial_number_str}'
:raise IOError: On failure.
WARNING: This function must correctly identify the device BEFORE it
is opened. Therefore, it must only use the information available
from USB enumeration.
"""
raise NotImplementedError()
@property
def serial_number(self):
"""Get the assigned serial number.
:return: The serial number string.
This attribute is valid even before the device is opened.
"""
raise NotImplementedError()
def open(self, event_callback_fn):
"""Open the USB device.
:param event_callback_fn: The function(event, message) to call on
asynchronous events, mostly to allow robust handling of device
errors. "event" is one of the :class:`DeviceEvent` values,
and the message is a more detailed description of the event.
:raise IOError: On failure.
The event_callback_fn may be called asynchronous and from other
threads. The event_callback_fn must implement any thread safety.
"""
raise NotImplementedError()
def close(self):
"""Close the USB device."""
raise NotImplementedError()
def control_transfer_out(self, cbk_fn, recipient, type_, request, value=0, index=0, data=None) -> bool:
"""Perform a control transfer with data from host to device.
:param cbk_fn: The function called with the class:`ControlTransferResponse` result.
This method guarantees that cbk_fn will always be called.
cbk_fn may be called BEFORE exiting this method call.
:param recipient: The recipient which is one of ['device', 'interface', 'endpoint', 'other']
:param type_: The type which is one of ['standard', 'class', 'vendor'].
:param request: The bRequest value.
:param value: The wValue value.
:param index: The wIndex value.
:param data: The optional data to transfer from host to device.
None (default) skips the data phase.
:return: True on pending, False on error.
"""
raise NotImplementedError()
def control_transfer_in(self, cbk_fn, recipient, type_, request, value, index, length) -> bool:
"""Perform a control transfer with data from device to host.
:param cbk_fn: The function called with the class:`ControlTransferResponse` result.
This method guarantees that cbk_fn will always be called.
cbk_fn may be called BEFORE exiting this method call.
:param recipient: The recipient which is one of ['device', 'interface', 'endpoint', 'other']
:param type_: The type which is one of ['standard', 'class', 'vendor'].
:param request: The bRequest value.
:param value: The wValue value.
:param index: The wIndex value.
:param length: The maximum number of bytes to transfer from device to host.
:return: True on pending, False on error.
"""
raise NotImplementedError()
def read_stream_start(self, endpoint_id, transfers, block_size, data_fn, process_fn, stop_fn):
"""Read a stream of data using non-blocking (overlapped) IO.
:param endpoint_id: The target endpoint address.
:param transfers: The number of overlapped transfers to use,
each of block_size bytes.
:param block_size: The length of each block in bytes which must be
a multiple of the maximum packet size for the endpoint.
:param data_fn: The function(data) to call on each block
of data. The data is an np.ndarray(dtype=uint8) containing
the raw bytes received for each USB transaction.
The length of data is normally block_size.
Any value less than block_size is the last transfer
in the transaction.
When the device stops, it calls data_fn(None). The
device can stop "automatically" through errors or when data_fn
returns True. Call :meth:`read_stream_stop` to stop from
the caller.
This function will be called from the device's thread. The
data_fn must return quickly to ensure that the USB stream
is not starved.
In all cases, data_fn should return None or False to continue
streaming. data_fn can return True to stop the transmission.
Most implementations use some form of non-blocking IO with
multiple queue (overlapped) transactions that are pended
early. On stop, additional data may be read before the
transaction fully stops.
:param process_fn: The function process_fn() to call after all
USB endpoints have been recently serviced and data_fn was
called at least once. The function should still be quick,
but it can have more latency than data_fn.
:param stop_fn: The function(event, message) called when this endpoint
stops streaming data. See :class:`DeviceEvent` for allowed event
values.
Use :meth:`read_stream_stop` to stop.
"""
raise NotImplementedError()
def read_stream_stop(self, endpoint_id):
"""Stop a read stream.
:param endpoint_id: The target endpoint address.
When stop is complete, the data_fn provided to read_stream_start will
be called with None.
Use :meth:`read_stream_start` to start.
"""
raise NotImplementedError()
def status(self):
"""Get the current device status.
:return: A dict containing the following structure:
endpoints: {
pipe_id: { name: {value: v, units: u}, ...}
...
}
"""
raise NotImplementedError()
def signal(self):
"""Signal that an external event occurred.
This method allows another thread to cause the wait in process
to activate.
"""
raise NotImplementedError()
def process(self, timeout=None):
"""Process any pending events.
:param timeout: The timeout in float seconds.
This method uses the operating-system specific method to wait on
pending events, such select and WaitForMultipleObjects.
"""
raise NotImplementedError()
class DeviceNotify:
def __init__(self, cbk):
"""Start device insertion/removal notification.
:param cbk: The function called on device insertion or removal. The
arguments are (inserted, info). "inserted" is True on insertion
and False on removal. "info" contains platform-specific details
about the device. In general, the application should rescan for
relevant devices.
"""
pass
def close(self):
"""Close and stop the notifications."""
raise NotImplementedError()
def scan(name: str=None):
"""Scan for attached devices.
:param name: The case-insensitive name of the device to scan.
:return: The list of attached backend :class:`Device` instances.
"""
raise NotImplementedError()
| StarcoderdataPython |
1745112 | <filename>tests/TwoClasses_TwoDimensions.py
# Copyright (C) 2021, <NAME> <<EMAIL>>=
#
# License: MIT (see COPYING file)
import sys
from os import path
import numpy as np
import pandas as pd
import time
from sklearn.preprocessing import MinMaxScaler
from CytOpT import robbinsWass, CytOpT
from CytOpT.labelPropSto import labelPropSto, cTransform, cost
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
class TwoClassesTwoDimension:
def __init__(self, inputs):
"""
2D projection using two markers.
The data are divided into two classes: the CD4 cells where the CD4 marker is present and the CD8 cells where the CD8 marker is present.
"""
self.inputs = inputs
self.I = self.inputs['X_source'].shape[0]
self.J = self.inputs['X_target'].shape[0]
def initClassProportions(self):
# Computation of the benchmark class proportions
h_source = np.zeros(2)
for k in range(2):
h_source[k] = np.sum(self.inputs['Lab_source'] == k) / len(self.inputs['Lab_source'])
h_true = np.zeros(2)
for k in range(2):
h_true[k] = np.sum(self.inputs['Lab_target'] == k) / len(self.inputs['Lab_target'])
"""
Illustration of the framework
A segmented data set and an unlabelled target data set.
"""
return {'h_source': h_source, 'h_true': h_true}
def optimalTransport(self):
"""'
Approximation of the optimal dual vector u.
In order to compute an approximation of the optimal transportation plan, we need to approximate 𝑃𝜀 .
"""
alpha = 1 / self.I * np.ones(self.I)
beta = 1 / self.J * np.ones(self.J)
# Preprocessing of the data
self.inputs['X_source'] = self.inputs['X_source'] * (self.inputs['X_source'] > 0)
self.inputs['X_target'] = self.inputs['X_target'] * (self.inputs['X_target'] > 0)
scaler = MinMaxScaler()
self.inputs['X_source'] = scaler.fit_transform(self.inputs['X_source'])
self.inputs['X_target'] = scaler.fit_transform(self.inputs['X_target'])
eps = 0.0001
n_iter = 15000
t0 = time.time()
u_last = robbinsWass(self.inputs['X_source'], self.inputs['X_target'],
alpha, beta, eps=eps, nIter=n_iter)
elapsed_time = time.time() - t0
print('Elapsed time :', elapsed_time / 60, 'mins')
# Label propagation
L_source = np.zeros((2, self.I))
for k in range(2):
L_source[k] = np.asarray(self.inputs['Lab_source'] == k, dtype=int)
t0 = time.time()
Result_LP = labelPropSto(L_source, u_last, self.inputs['X_source'], self.inputs['X_target'],
alpha, beta, eps)
elapsed_time = time.time() - t0
Lab_target_hat_one = Result_LP[1]
print('Elapsed_time ', elapsed_time / 60, 'mins')
return {'Lab_target_hat_one': Lab_target_hat_one, 'u_last': u_last}
def estimateCytOpT(self):
"""
Class proportions estimation with 𝙲𝚢𝚝𝙾𝚙𝚝
Descent-Ascent procedure
Setting of the parameters
"""
n_it_grad = 1000
n_it_sto = 10
pas_grad = 50
eps = 0.0001
h_true = self.initClassProportions()['h_true']
h_hat = CytOpT(xSource=self.inputs['X_source'], xTarget=self.inputs['X_target'],
labSource=self.inputs['Lab_source'],
method="both", eps=eps, nIter=n_it_grad, nItSto=n_it_sto,
stepGrad=pas_grad,
thetaTrue=h_true, monitoring=False)
return {'h_hat': h_hat['proportions']}
def estimateMinmax(self):
"""
Minmax swapping procedure
Setting of the parameters
"""
eps = 0.0001
lbd = 0.0001
n_iter = 4000
step_grad = 5
power = 0.99
Results_Minmax = CytOpT(self.inputs['X_source'], self.inputs['X_target'], self.inputs['Lab_source'],
eps=eps, lbd=lbd, nIter=n_iter,
step=step_grad, power=power, monitoring=False)
return {'h_hat': Results_Minmax['proportions']}
def optimalReweighted(self):
"""
Classification using optimal transport with reweighted proportions
The target measure 𝛽 is reweighted in order to match the weight vector ℎ̂ estimated with 𝙲𝚢𝚝𝙾𝚙𝚝 .
"""
value_h = self.estimateCytOpT()
beta = 1 / self.J * np.ones(self.J)
optimal_return = self.optimalTransport()
D = np.zeros((self.I, 2))
D[:, 0] = 1 / np.sum(self.inputs['Lab_source'] == 0) * np.asarray(self.inputs['Lab_source'] == 0, dtype=float)
D[:, 1] = 1 / np.sum(self.inputs['Lab_source'] == 1) * np.asarray(self.inputs['Lab_source'] == 1, dtype=float)
alpha_mod = D.dot(value_h['h_hat'])
# Approximation of the optimal dual vector u.
eps = 0.0001
n_iter = 150000
u_last_two = robbinsWass(self.inputs['X_source'], self.inputs['X_target'],
alpha_mod, beta, eps=eps, nIter=n_iter)
Result_LP = labelPropSto(self.inputs['Lab_source'], u_last_two, self.inputs['X_source'],
self.inputs['X_target'],
alpha_mod, beta, eps)
Lab_target_hat_two = Result_LP[1]
return {'Lab_target_hat_two': Lab_target_hat_two, 'u_last_two': u_last_two}
def optimalWithoutReweighted(self):
"""
Transportation plan with or without reweighting
Without reweighting
"""
n_sub = 500
eps = 0.0001
opt_transport_return = self.optimalTransport()
opt_rew = self.optimalReweighted()
beta = 1 / self.J * np.ones(self.J)
source_indices = np.random.choice(self.I, size=n_sub, replace=False)
u_ce_storage = np.zeros(self.J)
for j in range(self.J):
u_ce_storage[j] = cTransform(opt_transport_return['u_last'], self.inputs['X_source'],
self.inputs['X_target'],
j, beta)
indices = np.zeros((n_sub, 2))
for k, it in enumerate(source_indices):
indices[k, 0] = it
cost_x = cost(self.inputs['X_target'], self.inputs['X_source'][it])
arg = np.exp((opt_transport_return['u_last'][it] + u_ce_storage - cost_x) / eps)
indices[k, 1] = np.argmax(arg)
indices = np.asarray(indices, dtype=int)
# with reweighting
u_ce_storage_two = np.zeros(self.J)
for j in range(self.J):
u_ce_storage_two[j] = cTransform(opt_rew['u_last_two'], self.inputs['X_source'], self.inputs['X_target'],
j, beta)
indices_two = np.zeros((n_sub, 2))
for k, it in enumerate(source_indices):
indices_two[k, 0] = it
cost_x = cost(self.inputs['X_target'], self.inputs['X_source'][it])
arg = np.exp((opt_rew['u_last_two'][it] + u_ce_storage_two - cost_x) / eps)
indices_two[k, 1] = np.argmax(arg)
indices_two = np.asarray(indices_two, dtype=int)
X_target_lag = self.inputs['X_tar_display'].copy()
X_target_lag[:, 0] = X_target_lag[:, 0] + 3500
if __name__ == '__main__':
data = {"X_source": np.asarray(pd.read_csv('data/W2_1_values.csv',
usecols=np.arange(1, 8))[['CD4', 'CD8']]),
'X_target': np.asarray(pd.read_csv('data/W2_7_values.csv',
usecols=np.arange(1, 8))[['CD4', 'CD8']]),
'X_sou_display': np.asarray(pd.read_csv('data/W2_1_values.csv',
usecols=np.arange(1, 8))[['CD4', 'CD8']]),
'Lab_source': np.asarray(pd.read_csv('data/W2_1_clust.csv',
usecols=[1])['x'] >= 6, dtype=int),
'Lab_target': np.asarray(pd.read_csv('data/W2_7_clust.csv',
usecols=[1])['x'] >= 6, dtype=int),
'X_tar_display': np.asarray(pd.read_csv('data/W2_7_values.csv',
usecols=np.arange(1, 8))[['CD4', 'CD8']]),
'names_pop': ['CD8', 'CD4']}
test1 = TwoClassesTwoDimension(data)
print(test1.estimateCytOpT())
| StarcoderdataPython |
3397722 | #!/usr/bin/python
from k5test import *
# We should have a comprehensive suite of KDC host referral tests
# here, based on the tests in the kdc_realm subdir. For now, we just
# have a regression test for #7483.
# A KDC should not return a host referral to its own realm.
krb5_conf = {'master': {'domain_realm': {'y': 'KRBTEST.COM'}}}
kdc_conf = {'master': {'realms': {'$realm': {'host_based_services': 'x'}}}}
realm = K5Realm(krb5_conf=krb5_conf, kdc_conf=kdc_conf, create_host=False)
tracefile = os.path.join(realm.testdir, 'trace')
realm.run_as_client(['env', 'KRB5_TRACE=' + tracefile, kvno, '-u', 'x/z.y@'],
expected_code=1)
f = open(tracefile, 'r')
trace = f.read()
f.close()
if 'back to same realm' in trace:
fail('KDC returned referral to service realm')
success('KDC host referral tests')
| StarcoderdataPython |
1746592 | import boto3
import os
import requests
from settings import DEFAULT_REGION, KEYNAME
session = boto3.session.Session(region_name=DEFAULT_REGION, profile_name=KEYNAME)
def get_public_ip(instance_ids):
ec2_client = session.client("ec2")
reservations = ec2_client.describe_instances(InstanceIds=instance_ids).get(
"Reservations"
)
for reservation in reservations:
for instance in reservation["Instances"]:
return instance.get("PublicIpAddress")
def get_running_instances():
ec2_client = session.client("ec2")
reservations = ec2_client.describe_instances(
Filters=[{"Name": "instance-state-name", "Values": ["running"],}]
).get("Reservations")
instances = []
for reservation in reservations:
for instance in reservation["Instances"]:
instance_id = instance["InstanceId"]
instance_type = instance["InstanceType"]
public_ip = instance["PublicIpAddress"]
private_ip = instance["PrivateIpAddress"]
instances.append(
f"{instance_id}, {instance_type}, {public_ip}, {private_ip}"
)
return instances
def get_instance_status(instance_id):
ec2_client = session.client("ec2")
if instance_id:
reservations = ec2_client.describe_instances(InstanceIds=[instance_id]).get(
"Reservations"
)
else:
reservations = ec2_client.describe_instances().get("Reservations")
instances_status = []
for reservation in reservations:
for instance in reservation["Instances"]:
instance_id = instance["InstanceId"]
instance_type = instance["InstanceType"]
instance_status = instance["State"]["Name"]
public_dns_name = instance["PublicDnsName"]
link_details = "Server is spinning up"
if instance_status == "running":
link_details = "Server is up and docker is spinning up right now"
try:
response = requests.get(f"http://{public_dns_name}")
if response.status_code == 200:
link_details = f"The site is up and running. please visit http://{public_dns_name}"
except:
link_details = "Server is up and docker is spinning up right now"
elif instance_status == "terminated":
link_details = "Server is terminated"
elif instance_status == "shutting-down":
link_details = "Server is shutting down"
else:
link_details = ""
instances_status.append(
f"{instance_id}, {instance_type}, {instance_status}, {link_details}"
)
return instances_status
def stop_instance(instance_id):
ec2_client = session.client("ec2")
response = ec2_client.stop_instances(InstanceIds=[instance_id])
return response
def terminate_instance(instance_id):
ec2_client = session.client("ec2")
response = ec2_client.terminate_instances(InstanceIds=[instance_id])
return response
def create_key_pair():
ec2_client = session.client("ec2")
key_pair = ec2_client.create_key_pair(KeyName=KEYNAME)
private_key = key_pair["KeyMaterial"]
# write private key to file with 400 permissions
with os.fdopen(
os.open("/tmp/aws_ec2_key.pem", os.O_WRONLY | os.O_CREAT, 0o400), "w+"
) as handle:
handle.write(private_key)
| StarcoderdataPython |
1698896 | import os
import time
import logging
import unittest
from unittest.mock import patch
from configparser import ConfigParser
import uuid
import pandas as pd
import numpy as np
import itertools
import shutil
from OTUSampleMetadataCorrelation.OTUSampleMetadataCorrelationServer import MethodContext
from OTUSampleMetadataCorrelation.authclient import KBaseAuth as _KBaseAuth
from installed_clients.WorkspaceClient import Workspace
from OTUSampleMetadataCorrelation.OTUSampleMetadataCorrelationImpl import OTUSampleMetadataCorrelation
__all__ = [
'shared_folder',
'get_serviceImpl',
'ctx',
'BaseTest',
'do_patch',
]
######################################
do_patch = True # toggle patching for tests that can run independent of it
if do_patch:
patch_ = patch
patch_dict_ = patch.dict
else:
patch_ = lambda *a, **k: lambda f: f
patch_dict_ = lambda *a, **k: lambda f: f
######################################
token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('OTUSampleMetadataCorrelation'):
cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
ctx = MethodContext(None)
ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'OTUSampleMetadataCorrelation',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
wsURL = cfg['workspace-url']
wsClient = Workspace(wsURL)
shared_folder = cfg['scratch']
####################################################################################################
####################################################################################################
def get_serviceImpl():
return OTUSampleMetadataCorrelation(cfg)
####################################################################################################
####################################################################################################
class BaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('BaseTest setUpClass')
#
cls.wsName = 'OTUSampleMetadataCorrelation_' + str(uuid.uuid4())
cls.wsId = wsClient.create_workspace({'workspace': cls.wsName})[0]
cls.ws = {
'workspace_id': cls.wsId,
'workspace_name': cls.wsName,
}
@classmethod
def tearDownClass(cls):
print('BaseTest tearDownClass')
#
if hasattr(cls, 'wsName'):
wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace deleted')
def shortDescription(self):
'''Override unittest using test*() docstrings in lieu of test*() method name in output summary'''
return None
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('OTUSampleMetadataCorrelation'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'OTUSampleMetadataCorrelation',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.wsName = 'OTUSampleMetadataCorrelation_' + str(uuid.uuid4())
cls.wsId = cls.wsClient.create_workspace({'workspace': cls.wsName})[0]
cls.params_ws = {
'workspace_id': cls.wsId,
'workspace_name': cls.wsName,
}
cls.serviceImpl = OTUSampleMetadataCorrelation(cls.cfg)
cls.shared_folder = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
| StarcoderdataPython |
3304938 | """Test for the EventThread."""
from unittest.mock import AsyncMock, patch
import pytest
from onyx_client.data.device_mode import DeviceMode
from onyx_client.data.numeric_value import NumericValue
from onyx_client.device.shutter import Shutter
from onyx_client.enum.action import Action
from onyx_client.enum.device_type import DeviceType
from custom_components.hella_onyx import (
EventThread,
)
from custom_components.hella_onyx.api_connector import (
UnknownStateException,
)
from custom_components.hella_onyx.const import MAX_BACKOFF_TIME
class TestEventThread:
@pytest.fixture
def api(self):
yield MockAPI()
@pytest.fixture
def coordinator(self):
yield AsyncMock()
@pytest.fixture
def thread(self, api, coordinator):
yield EventThread(api, coordinator, force_update=False, backoff=False)
@pytest.mark.asyncio
async def test_update(self, thread, api, coordinator):
api.called = False
await thread._update()
assert api.is_called
assert not api.is_force_update
assert coordinator.async_set_updated_data.called
@pytest.mark.asyncio
async def test_update_force_update(self, thread, api, coordinator):
thread._force_update = True
api.called = False
await thread._update()
assert api.is_called
assert api.is_force_update
assert coordinator.async_set_updated_data.called
@pytest.mark.asyncio
async def test_update_invalid_device(self, thread, api, coordinator):
api.called = False
api.fail_device = True
await thread._update()
assert api.is_called
assert not api.is_force_update
assert not coordinator.async_set_updated_data.called
@pytest.mark.asyncio
async def test_update_none_device(self, thread, api, coordinator):
api.called = False
api.none_device = True
await thread._update()
assert api.is_called
assert not api.is_force_update
assert not coordinator.async_set_updated_data.called
@pytest.mark.asyncio
async def test_update_connection_error(self, thread, api, coordinator):
api.called = False
api.fail = True
await thread._update()
assert api.is_called
assert not api.is_force_update
assert not coordinator.async_set_updated_data.called
@pytest.mark.asyncio
async def test_update_backoff(self, thread, api, coordinator):
api.called = False
async def sleep_called(backoff: int):
assert backoff > 0
assert backoff / 60 < MAX_BACKOFF_TIME
thread._backoff = False
with patch("asyncio.sleep", new=sleep_called):
thread._backoff = True
api.fail = True
assert thread._backoff
await thread._update()
assert api.is_called
assert not api.is_force_update
assert not thread._backoff
assert not coordinator.async_set_updated_data.called
class MockAPI:
def __init__(self):
self.called = False
self.force_update = False
self.fail = False
self.fail_device = False
self.none_device = False
@property
def is_called(self):
return self.called
@property
def is_force_update(self):
return self.force_update
def device(self, uuid: str):
self.called = True
if self.none_device:
return None
if self.fail_device:
raise UnknownStateException("ERROR")
numeric = NumericValue(10, 10, 10, False, None)
return Shutter(
"uuid", "name", None, None, None, None, numeric, numeric, numeric
)
async def listen_events(self, force_update: bool):
self.called = True
self.force_update = force_update
if self.fail:
raise NotImplementedError()
yield Shutter(
"uuid",
"other",
DeviceType.RAFFSTORE_90,
DeviceMode(DeviceType.RAFFSTORE_90),
list(Action),
)
| StarcoderdataPython |
4801930 | import pymysql
def db_connect():
try:
db = pymysql.connect("localhost", "TaipeiWaterServer", "tpewater123", "TaipeiWater")
cursor = db.cursor()
except pymysql.MySQLError:
return None, None
return db, cursor
def sql_execute(db, cursor, sql, commit):
try:
cursor.execute(sql)
if commit:
db.commit()
except pymysql.MySQLError as e:
print(e)
print(sql)
return ()
return cursor.fetchall()
def db_close(db):
db.close()
return
| StarcoderdataPython |
1607988 | """
Various density standards.
"""
from numpy import array
# Visual density is typically used on grey patches. Take a reading and get
# the density values of the Red, Green, and Blue filters. If the difference
# between the highest and lowest value is less than or equal to the value
# below, return the density reading calculated against the ISO Visual spectral
# weighting curve. The X-Rite 500 uses a thresh of 0.05, the X-Rite i1 appears
# to use 0.08.
VISUAL_DENSITY_THRESH = 0.08
ANSI_STATUS_A_RED = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.37,
43.45,
100.00,
74.30,
40.18,
19.32,
7.94,
3.56,
1.46,
0.60,
0.24,
0.09,
0.04,
0.01,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_A_GREEN = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.04,
6.64,
60.53,
100.00,
80.54,
44.06,
16.63,
4.06,
0.58,
0.04,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_A_BLUE = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
4.00,
65.92,
100.00,
81.66,
41.69,
10.96,
0.79,
0.04,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_E_RED = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.01,
0.06,
0.45,
29.99,
100.00,
84.92,
54.95,
25.00,
10.00,
5.00,
1.50,
0.50,
0.30,
0.15,
0.05,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_E_GREEN = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.01,
1.00,
5.00,
27.99,
68.08,
92.04,
100.00,
87.90,
66.07,
41.98,
21.98,
8.99,
2.50,
0.70,
0.09,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_E_BLUE = array((
0.00,
0.00,
0.00,
0.01,
0.27,
2.70,
13.00,
29.99,
59.98,
82.04,
100.00,
90.99,
76.03,
46.99,
17.99,
6.00,
0.80,
0.05,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_M_RED = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.13,
30.13,
100.00,
79.25,
37.84,
17.86,
7.50,
3.10,
1.26,
0.49,
0.19,
0.07,
0.03,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_M_GREEN = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.01,
0.16,
1.43,
6.37,
18.71,
42.27,
74.47,
100.00,
98.86,
65.77,
28.71,
8.22,
1.49,
0.17,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_M_BLUE = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.13,
12.91,
42.85,
74.30,
100.00,
90.16,
55.34,
22.03,
5.53,
0.98,
0.07,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_T_RED = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.06,
0.45,
29.99,
100.00,
84.92,
54.95,
25.00,
10.00,
5.00,
1.50,
0.50,
0.30,
0.15,
0.05,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_T_GREEN = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
1.00,
5.00,
27.99,
68.08,
92.04,
100.00,
87.90,
66.07,
41.98,
21.98,
8.99,
2.50,
0.70,
0.09,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_T_BLUE = array((
0.00,
0.01,
0.02,
0.10,
0.30,
1.50,
6.00,
16.98,
39.99,
59.98,
82.04,
93.97,
100.00,
97.05,
84.92,
65.01,
39.99,
17.99,
5.00,
0.20,
0.04,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
TYPE1 = array((
0.00,
0.00,
0.01,
0.04,
0.72,
28.84,
100.00,
28.84,
0.72,
0.04,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
TYPE2 = array((
0.01,
0.51,
19.05,
38.28,
57.54,
70.96,
82.41,
90.36,
97.27,
100.00,
97.72,
89.33,
73.11,
55.34,
38.19,
22.44,
9.84,
2.52,
0.64,
0.16,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ISO_VISUAL = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.01,
0.02,
0.08,
0.28,
0.65,
1.23,
2.22,
3.82,
6.58,
10.99,
18.88,
32.58,
50.35,
66.83,
80.35,
90.57,
97.50,
100.00,
97.50,
90.36,
79.80,
67.14,
53.83,
39.17,
27.10,
17.30,
10.30,
5.61,
3.09,
1.54,
0.80,
0.42,
0.22,
0.11,
0.05,
0.03,
0.01,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
| StarcoderdataPython |
3296512 | <reponame>whdalsrnt/cost-analysis<filename>src/spaceone/cost_analysis/service/cost_query_set_service.py<gh_stars>1-10
import logging
from spaceone.core.service import *
from spaceone.core import utils
from spaceone.cost_analysis.error import *
from spaceone.cost_analysis.manager.cost_query_set_manager import CostQuerySetManager
from spaceone.cost_analysis.model.cost_query_set_model import CostQuerySet
_LOGGER = logging.getLogger(__name__)
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
class CostQuerySetService(BaseService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cost_query_set_mgr: CostQuerySetManager = self.locator.get_manager('CostQuerySetManager')
@transaction(append_meta={'authorization.scope': 'USER'})
@check_required(['name', 'options', 'domain_id'])
@change_date_value(['start', 'end'])
def create(self, params):
"""Register cost_query_set
Args:
params (dict): {
'name': 'str',
'options': 'str',
'tags': 'dict',
'domain_id': 'str'
}
Returns:
cost_query_set_vo (object)
"""
params['user_id'] = self.transaction.get_meta('user_id')
return self.cost_query_set_mgr.create_cost_query_set(params)
@transaction(append_meta={'authorization.scope': 'USER'})
@check_required(['cost_query_set_id', 'domain_id'])
@change_date_value(['end'])
def update(self, params):
"""Update cost_query_set
Args:
params (dict): {
'cost_query_set_id': 'str',
'name': 'str',
'options': 'dict',
'tags': 'dict'
'domain_id': 'str'
}
Returns:
cost_query_set_vo (object)
"""
cost_query_set_id = params['cost_query_set_id']
domain_id = params['domain_id']
cost_query_set_vo: CostQuerySet = self.cost_query_set_mgr.get_cost_query_set(cost_query_set_id, domain_id)
return self.cost_query_set_mgr.update_cost_query_set_by_vo(params, cost_query_set_vo)
@transaction(append_meta={'authorization.scope': 'USER'})
@check_required(['cost_query_set_id', 'domain_id'])
def delete(self, params):
"""Deregister cost_query_set
Args:
params (dict): {
'cost_query_set_id': 'str',
'domain_id': 'str'
}
Returns:
None
"""
self.cost_query_set_mgr.delete_cost_query_set(params['cost_query_set_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'USER'})
@check_required(['cost_query_set_id', 'domain_id'])
def get(self, params):
""" Get cost_query_set
Args:
params (dict): {
'cost_query_set_id': 'str',
'domain_id': 'str',
'only': 'list
}
Returns:
cost_query_set_vo (object)
"""
cost_query_set_id = params['cost_query_set_id']
domain_id = params['domain_id']
return self.cost_query_set_mgr.get_cost_query_set(cost_query_set_id, domain_id, params.get('only'))
@transaction(append_meta={
'authorization.scope': 'USER',
'mutation.append_parameter': {'user_self': 'user_id'}
})
@check_required(['domain_id'])
@append_query_filter(['cost_query_set_id', 'name', 'user_id', 'domain_id', 'user_self'])
@append_keyword_filter(['cost_query_set_id', 'name'])
def list(self, params):
""" List cost_query_sets
Args:
params (dict): {
'cost_query_set_id': 'str',
'name': 'str',
'user_id': 'str',
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.Query)',
'user_self': 'list', // from meta
}
Returns:
cost_query_set_vos (object)
total_count
"""
query = params.get('query', {})
return self.cost_query_set_mgr.list_cost_query_sets(query)
@transaction(append_meta={
'authorization.scope': 'USER',
'mutation.append_parameter': {'user_self': 'user_id'}
})
@check_required(['query', 'domain_id'])
@append_query_filter(['domain_id', 'user_self'])
@append_keyword_filter(['cost_query_set_id', 'name'])
def stat(self, params):
"""
Args:
params (dict): {
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)',
'user_self': 'list', // from meta
}
Returns:
values (list) : 'list of statistics data'
"""
query = params.get('query', {})
return self.cost_query_set_mgr.stat_cost_query_sets(query)
| StarcoderdataPython |
141784 | <reponame>REAM-lab/switch
import pandas as pd
from switch_model.wecc.get_inputs.register_post_process import post_process_step
@post_process_step(msg="Replacing _ALL_ZONES plants with a plant in each zone")
def post_process(_):
"""
This post-process step replaces all the generation projects that have a load called
_ALL_ZONES with a generation project for each load zone.
"""
# Read load_zones.csv
load_zones = pd.read_csv("load_zones.csv", index_col=False)
load_zones["dbid_suffix"] = "_" + load_zones["zone_dbid"].astype(str)
num_zones = len(load_zones)
def replace_rows(
plants_to_copy,
filename,
df=None,
plants_col="GENERATION_PROJECT",
load_column=None,
):
# If the df does not already exist, read the file
if df is None:
df = pd.read_csv(filename, index_col=False)
# Save the columns for later use
df_col = df.columns
df_rows = len(df)
# Force the plants_col to string type to allow concating
df = df.astype({plants_col: str})
plants_to_copy = plants_to_copy.astype(str)
# Extract the rows that need copying
should_copy = df[plants_col].isin(plants_to_copy)
rows_to_copy = df[should_copy]
if rows_to_copy.empty:
return
# Filter out the plants that need replacing from our data frame
df = df[~should_copy]
# replacement is the cross join of the plants that need replacement
# with the load zones. The cross join is done by joining over a column called
# key that is always 1.
replacement = rows_to_copy.assign(key=1).merge(
load_zones.assign(key=1),
on="key",
)
replacement[plants_col] = replacement[plants_col] + replacement["dbid_suffix"]
if load_column is not None:
# Set gen_load_zone to be the LOAD_ZONE column
replacement[load_column] = replacement["LOAD_ZONE"]
# Keep the same columns as originally
replacement = replacement[df_col]
# Add the replacement plants to our dataframe
df = df.append(replacement)
assert len(df) == df_rows + len(rows_to_copy) * (num_zones - 1)
df.to_csv(filename, index=False)
plants = pd.read_csv("generation_projects_info.csv", index_col=False)
# Find the plants that need replacing
to_replace = plants[plants["gen_load_zone"] == "_ALL_ZONES"]
# If no plant needs replacing end there
if to_replace.empty:
return
# If to_replace has variable capacity factors we raise exceptions
# since the variabale capacity factors won't be the same across zones
if any(to_replace["gen_is_variable"] == 1):
raise Exception(
"generation_projects_info.csv contains variable plants "
"with load zone _ALL_ZONES. This is not allowed since "
"copying variable capacity factors to all "
"zones is not implemented (and likely unwanted)."
)
plants_to_replace = to_replace["GENERATION_PROJECT"]
replace_rows(
plants_to_replace,
"generation_projects_info.csv",
load_column="gen_load_zone",
df=plants,
)
replace_rows(plants_to_replace, "gen_build_costs.csv")
replace_rows(plants_to_replace, "gen_build_predetermined.csv")
| StarcoderdataPython |
3236461 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 <NAME>
""" Container class for optical usage information
.. Created on Thu Jan 25 11:01:04 2018
.. codeauthor: <NAME>
"""
import math
import numpy as np
from rayoptics.parax.firstorder import compute_first_order, list_parax_trace
from rayoptics.raytr.trace import aim_chief_ray
from rayoptics.optical import model_enums
import rayoptics.optical.model_constants as mc
from opticalglass.spectral_lines import get_wavelength
import rayoptics.util.colour_system as cs
from rayoptics.util import colors
srgb = cs.cs_srgb
class OpticalSpecs:
""" Container class for optical usage information
Contains optical usage information to specify the aperture, field of view,
spectrum and focal position.
It maintains a repository of paraxial data.
Attributes:
spectral_region: instance of :class:`~.WvlSpec`
pupil: instance of :class:`~.PupilSpec`
field_of_view: instance of :class:`~.FieldSpec`
defocus: instance of :class:`~.FocusRange`
parax_data: tuple of :obj:`~.firstorder.ParaxData`
"""
do_aiming_default = True
def __init__(self, opt_model, specsheet=None, **kwargs):
self.opt_model = opt_model
self.spectral_region = WvlSpec(**kwargs)
self.pupil = PupilSpec(self)
self.field_of_view = FieldSpec(self)
self.defocus = FocusRange(0.0)
self.parax_data = None
self.do_aiming = OpticalSpecs.do_aiming_default
if specsheet:
self.set_from_specsheet(specsheet)
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['opt_model']
del attrs['parax_data']
del attrs['do_aiming']
return attrs
def set_from_list(self, dl):
self.spectral_region = dl[0]
self.pupil = dl[1]
self.field_of_view = dl[2]
def set_from_specsheet(self, ss):
self.spectral_region.set_from_specsheet(ss)
self.pupil.set_from_specsheet(ss)
self.field_of_view.set_from_specsheet(ss)
self.defocus.set_from_specsheet(ss)
def sync_to_restore(self, opt_model):
self.opt_model = opt_model
if not hasattr(self, 'defocus'):
self.defocus = FocusRange(0.0)
if not hasattr(self, 'do_aiming'):
self.do_aiming = OpticalSpecs.do_aiming_default
self.spectral_region.sync_to_restore(self)
self.pupil.sync_to_restore(self)
self.field_of_view.sync_to_restore(self)
def update_model(self):
self.spectral_region.update_model()
self.pupil.update_model()
self.field_of_view.update_model()
stop = self.opt_model.seq_model.stop_surface
wvl = self.spectral_region.central_wvl
self.parax_data = compute_first_order(self.opt_model, stop, wvl)
if self.do_aiming and self.opt_model.seq_model.get_num_surfaces() > 2:
for i, fld in enumerate(self.field_of_view.fields):
aim_pt = aim_chief_ray(self.opt_model, fld, wvl)
fld.aim_pt = aim_pt
def lookup_fld_wvl_focus(self, fi, wl=None, fr=0.0):
""" returns field, wavelength and defocus data
Args:
fi (int): index into the field_of_view list of Fields
wl (int): index into the spectral_region list of wavelengths
fr (float): focus range parameter, -1.0 to 1.0
Returns:
(**fld**, **wvl**, **foc**)
- **fld** - :class:`Field` instance for field_of_view[fi]
- **wvl** - wavelength in nm
- **foc** - focus shift from image interface
"""
if wl is None:
wvl = self.spectral_region.central_wvl
else:
wvl = self.spectral_region.wavelengths[wl]
fld = self.field_of_view.fields[fi]
foc = self.defocus.get_focus(fr)
return fld, wvl, foc
def obj_coords(self, fld):
fov = self.field_of_view
fod = self.parax_data.fod
field, obj_img_key, value_key = fov.key
if obj_img_key == 'object':
if value_key == 'angle':
ang_dg = np.array([fld.x, fld.y, 0.0])
dir_tan = np.tan(np.deg2rad(ang_dg))
obj_pt = -dir_tan*(fod.obj_dist+fod.enp_dist)
elif value_key == 'height':
obj_pt = np.array([fld.x, fld.y, 0.0])
elif obj_img_key == 'image':
if value_key == 'height':
img_pt = np.array([fld.x, fld.y, 0.0])
obj_pt = fod.red*img_pt
return obj_pt
def list_first_order_data(self):
self.parax_data.fod.list_first_order_data()
def list_parax_trace(self):
list_parax_trace(self.opt_model)
class WvlSpec:
""" Class defining a spectral region
A spectral region is a list of wavelengths (in nm) and corresponding
weights. The central wavelength of the spectral region is central_wvl.
The index into the wavelength list for central_wvl is reference_wvl.
"""
def __init__(self, wlwts=[(550., 1.)], ref_wl=0, do_init=True, **kwargs):
if do_init:
self.set_from_list(wlwts)
else:
self.wavelengths = []
self.spectral_wts = []
self.reference_wvl = ref_wl
self.coating_wvl = 550.0
@property
def central_wvl(self):
return self.wavelengths[self.reference_wvl]
@central_wvl.setter
def central_wvl(self, wvl):
self.wavelengths[self.reference_wvl] = wvl
def set_from_list(self, wlwts):
self.wavelengths = []
self.spectral_wts = []
for wlwt in wlwts:
self.wavelengths.append(get_wavelength(wlwt[0]))
self.spectral_wts.append(wlwt[1])
self.calc_colors()
def sync_to_restore(self, optical_spec):
self.calc_colors()
def set_from_specsheet(self, ss):
pass
def update_model(self):
self.calc_colors()
def add(self, wl, wt):
self.wavelengths.append(get_wavelength(wl))
self.spectral_wts.append(wt)
self.spectrum.sort(key=lambda w: w[0], reverse=True)
def calc_colors(self):
accent = colors.accent_colors()
self.render_colors = []
num_wvls = len(self.wavelengths)
if num_wvls == 1:
self.render_colors.append(accent['green'])
elif num_wvls > 1:
step = 1 if self.wavelengths[0] < self.wavelengths[-1] else -1
if num_wvls == 2:
c = ['blue', 'red']
elif num_wvls == 3:
c = ['blue', 'green', 'red']
elif num_wvls == 4:
c = ['blue', 'green', 'yellow', 'red']
elif num_wvls == 5:
c = ['violet', 'cyan', 'green', 'yellow', 'red']
elif num_wvls == 6:
c = ['violet', 'cyan', 'green', 'yellow', 'red', 'magenta']
else:
c = ['violet', 'blue', 'cyan', 'green', 'yellow',
'red', 'magenta']
self.render_colors = [accent[clr] for clr in c[::step]]
# else:
# for w in self.wavelengths:
# print("calc_colors", w)
# rgb = srgb.wvl_to_rgb(w)
# print("rgb", rgb)
# self.render_colors.append(rgb)
class PupilSpec:
""" Aperture specification
Attributes:
key: 'aperture', 'object'|'image', 'pupil'|'NA'|'f/#'
value: size of the pupil
pupil_rays: list of relative pupil coordinates for pupil limiting rays
ray_labels: list of string labels for pupil_rays
"""
default_pupil_rays = [[0., 0.], [1., 0.], [-1., 0.], [0., 1.], [0., -1.]]
default_ray_labels = ['00', '+X', '-X', '+Y', '-Y']
def __init__(self, parent, key=('object', 'pupil'), value=1.0):
self.optical_spec = parent
self.key = 'aperture', key[0], key[1]
self.value = value
self.pupil_rays = PupilSpec.default_pupil_rays
self.ray_labels = PupilSpec.default_ray_labels
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['optical_spec']
return attrs
def sync_to_restore(self, optical_spec):
if hasattr(self, 'pupil_type'):
self.key = model_enums.get_ape_key_for_type(self.pupil_type)
del self.pupil_type
self.optical_spec = optical_spec
def set_from_list(self, ppl_spec):
self.key = model_enums.get_ape_key_for_type(ppl_spec[0])
self.value = ppl_spec[1]
def set_from_specsheet(self, ss):
for k, v in ss.etendue_inputs['aperture'].items():
if len(v) > 0:
obj_img_key = k
for k1, v1 in v.items():
value_key = k1
break
self.key = 'aperture', obj_img_key, value_key
self.value = ss.etendue_inputs['aperture'][obj_img_key][value_key]
def get_input_for_specsheet(self):
return self.key, self.value
def update_model(self):
if not hasattr(self, 'pupil_rays'):
self.pupil_rays = PupilSpec.default_pupil_rays
self.ray_labels = PupilSpec.default_ray_labels
def get_pupil_type(self):
return model_enums.get_ape_type_for_key(self.key).value
def mutate_pupil_type(self, new_pupil_type):
ape_key = model_enums.get_ape_key_for_type(new_pupil_type)
aperture, obj_img_key, value_key = ape_key
if self.optical_spec is not None:
if self.optical_spec.parax_data is not None:
fod = self.optical_spec.parax_data.fod
if obj_img_key == 'object':
if value_key == 'pupil':
self.value = 2*fod.enp_radius
elif value_key == 'NA':
self.value = fod.obj_na
elif obj_img_key == 'image':
if value_key == 'f/#':
self.value = fod.fno
elif value_key == 'NA':
self.value = fod.img_na
self.key = ape_key
class FieldSpec:
""" Field of view specification
Attributes:
key: 'field', 'object'|'image', 'height'|'angle'
fields: list of Field instances
"""
def __init__(self, parent, key=('object', 'angle'), flds=[0.],
do_init=True, **kwargs):
self.optical_spec = parent
self.key = 'field', key[0], key[1]
if do_init:
self.set_from_list(flds)
else:
self.fields = []
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['optical_spec']
return attrs
def sync_to_restore(self, optical_spec):
if hasattr(self, 'field_type'):
self.key = model_enums.get_fld_key_for_type(self.field_type)
del self.field_type
self.optical_spec = optical_spec
def __str__(self):
return "key={}, max field={}".format(self.key, self.max_field()[0])
def set_from_list(self, flds):
self.fields = [Field() for f in range(len(flds))]
for i, f in enumerate(self.fields):
f.y = flds[i]
self.value, _ = self.max_field()
def set_from_specsheet(self, ss):
for k, v in ss.etendue_inputs['field'].items():
if len(v) > 0:
obj_img_key = k
for k1, v1 in v.items():
value_key = k1
break
self.key = 'field', obj_img_key, value_key
flds = [0, ss.etendue_inputs['field'][obj_img_key][value_key]]
self.set_from_list(flds)
def get_input_for_specsheet(self):
return self.key, self.max_field()[0]
def update_model(self):
for f in self.fields:
f.update()
# recalculate max_field and relabel fields.
# relabeling really assumes the fields are radial, specifically,
# y axis only
max_field, fi = self.max_field()
self.value = max_field
field_norm = 1.0 if max_field == 0 else 1.0/max_field
self.index_labels = []
for i, f in enumerate(self.fields):
if f.x != 0.0:
fldx = '{:5.2f}x'.format(field_norm*f.x)
else:
fldx = ''
if f.y != 0.0:
fldy = '{:5.2f}y'.format(field_norm*f.y)
else:
fldy = ''
self.index_labels.append(fldx + fldy)
self.index_labels[0] = 'axis'
if len(self.index_labels) > 1:
self.index_labels[-1] = 'edge'
return self
def get_field_type(self):
return model_enums.get_fld_type_for_key(self.key).value
def mutate_field_type(self, new_field_type):
osp = self.optical_spec
fld_key = model_enums.get_fld_key_for_type(new_field_type)
field, obj_img_key, value_key = fld_key
if self.optical_spec is not None:
if osp.parax_data is not None:
fod = self.optical_spec.parax_data.fod
if obj_img_key == 'object':
if value_key == 'height':
self.value = osp.parax_data.pr_ray[0][mc.ht]
elif value_key == 'angle':
self.value = fod.obj_ang
elif obj_img_key == 'image':
if value_key == 'height':
self.value = fod.img_ht
self.key = fld_key
def max_field(self):
""" calculates the maximum field of view
Returns:
magnitude of maximum field, maximum Field instance
"""
max_fld = None
max_fld_sqrd = -1.0
for i, f in enumerate(self.fields):
fld_sqrd = f.x*f.x + f.y*f.y
if fld_sqrd > max_fld_sqrd:
max_fld_sqrd = fld_sqrd
max_fld = i
return math.sqrt(max_fld_sqrd), max_fld
class Field:
""" a single field point
Attributes:
x: x field component in absolute units
y: y field component in absolute units
vux: +x vignetting factor
vuy: +y vignetting factor
vlx: -x vignetting factor
vly: -y vignetting factor
wt: field weight
aim_pt: x, y chief ray coords on the paraxial entrance pupil plane
chief_ray: ray package for the ray from the field point throught the
center of the aperture stop, traced in the central
wavelength
ref_sphere: a tuple containing (image_pt, ref_dir, ref_sphere_radius)
"""
def __init__(self, x=0., y=0., wt=1.):
self.x = x
self.y = y
self.vux = 0.0
self.vuy = 0.0
self.vlx = 0.0
self.vly = 0.0
self.wt = wt
self.aim_pt = None
self.chief_ray = None
self.ref_sphere = None
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['chief_ray']
del attrs['ref_sphere']
del attrs['pupil_rays']
return attrs
def __str__(self):
return "{}, {}".format(self.x, self.y)
def __repr__(self):
return "Field(x={}, y={}, wt={})".format(self.x, self.y, self.wt)
def update(self):
self.chief_ray = None
self.ref_sphere = None
def apply_vignetting(self, pupil):
vig_pupil = pupil[:]
if pupil[0] < 0.0:
if self.vlx != 0.0:
vig_pupil[0] *= (1.0 - self.vlx)
else:
if self.vux != 0.0:
vig_pupil[0] *= (1.0 - self.vux)
if pupil[1] < 0.0:
if self.vly != 0.0:
vig_pupil[1] *= (1.0 - self.vly)
else:
if self.vuy != 0.0:
vig_pupil[1] *= (1.0 - self.vuy)
return vig_pupil
class FocusRange:
""" Focus range specification
Attributes:
focus_shift: focus shift (z displacement) from nominal image interface
defocus_range: +/- half the total focal range, from the focus_shift
position
"""
def __init__(self, focus_shift=0.0, defocus_range=0.0):
self.focus_shift = focus_shift
self.defocus_range = defocus_range
def __repr__(self):
return ("FocusRange(focus_shift={}, defocus_range={})"
.format(self.focus_shift, self.defocus_range))
def set_from_specsheet(self, ss):
pass
def update(self):
pass
def get_focus(self, fr=0.0):
""" return focus position for input focus range parameter
Args:
fr (float): focus range parameter, -1.0 to 1.0
Returns:
focus position for input focus range parameter
"""
return self.focus_shift + fr*self.defocus_range
| StarcoderdataPython |
1652502 | <gh_stars>1-10
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest.case import TestCase
from fancontrol.sense import temperatures
class NoneTests(TestCase):
def test_get_temps_None_only(self):
chips = [None]
features = [None]
temps = temperatures.get_temps(chips=chips, features=features)
assert isinstance(temps[0], type(None))
def test_get_temps_None_multiple(self):
chips = [None, None]
features = [None, None]
temps = temperatures.get_temps(chips=chips, features=features)
assert isinstance(temps[0], type(None))
assert isinstance(temps[1], type(None))
def test_get_temps_None_before(self):
chips = [None, 'k10temp', 'it8718']
features = [None, 'temp1', 'temp1']
temps = temperatures.get_temps(chips=chips, features=features)
assert isinstance(temps[0], type(None))
assert type(temps[1]) is float
assert type(temps[2]) is float
def test_get_temps_None_between(self):
chips = ['k10temp', None, 'it8718']
features = ['temp1', None, 'temp1']
temps = temperatures.get_temps(chips=chips, features=features)
assert type(temps[0]) is float
assert isinstance(temps[1], type(None))
assert type(temps[2]) is float
def test_get_temps_None_after(self):
chips = ['k10temp', 'it8718', None]
features = ['temp1', 'temp1', None]
temps = temperatures.get_temps(chips=chips, features=features)
assert type(temps[0]) is float
assert type(temps[1]) is float
assert isinstance(temps[2], type(None))
def test_get_temps_chip_before(self):
chips = ['k10temp', None, None]
features = ['temp1', None, None]
temps = temperatures.get_temps(chips=chips, features=features)
assert type(temps[0]) is float
assert isinstance(temps[1], type(None))
assert isinstance(temps[2], type(None))
def test_get_temps_chip_between(self):
chips = [None, 'k10temp', None]
features = [None, 'temp1', None]
temps = temperatures.get_temps(chips=chips, features=features)
assert isinstance(temps[0], type(None))
assert type(temps[1]) is float
assert isinstance(temps[2], type(None))
def test_get_temps_chip_after(self):
chips = [None, None, 'k10temp']
features = [None, None, 'temp1']
temps = temperatures.get_temps(chips=chips, features=features)
assert isinstance(temps[0], type(None))
assert isinstance(temps[1], type(None))
assert type(temps[2]) is float
class SensorTests(TestCase):
def test_get_temps_one_chip_multiple_sensors(self):
chips = ['it8718', 'it8718']
features = ['temp1', 'temp2']
temps = temperatures.get_temps(chips=chips, features=features)
assert type(temps[0]) is float
assert type(temps[1]) is float
def test_get_temps_multiple_chips_one_sensor_each(self):
chips = ['k10temp', 'it8718']
features = ['temp1', 'temp1']
temps = temperatures.get_temps(chips=chips, features=features)
assert type(temps[0]) is float
assert type(temps[1]) is float
def test_get_temps_one_chip_one_sensor_multiple_fans(self):
chips = ['k10temp', 'k10temp']
features = ['temp1', 'temp1']
temps = temperatures.get_temps(chips=chips, features=features)
assert type(temps[0]) is float
assert type(temps[1]) is float
assert temps[0] == temps[1]
| StarcoderdataPython |
3206408 | <gh_stars>1-10
import requests
class Member:
"""A model representing a member of the club.
Attributes:
first: The member's first name.
last: The member's last name.
email: The member's email address.
"""
def __init__(self, row):
"""Creates a member model from a row in the Google Sheet.
Args:
row: The row of the spreadsheet that represents this member.
"""
self.first = row[1]
self.last = row[2]
self.email = row[0]
def __repr__(self):
"""Returns a string that can be used in a to/cc/bcc field in an email.
e.g. <NAME> <<EMAIL>>
Returns:
The string representing this member.
"""
return "%s %s <%s>" % (self.first, self.last, self.email)
def request_members(config):
"""Requests all members from the Google Sheet.
Args:
config: The config model that was generated from config.json.
Returns:
An array of all members in the Google Sheet.
"""
# 1000 is arbitrary since there doesn't seem to be a "last row" selector.
#
# Choosing a value that is greater than the number of rows in your
# spreadsheet causes no issues, so just pick a bigger number if necessary.
range_ = "A1:C1000"
api_key = config.keys.google_api_key
if api_key == None or api_key == "":
print("Your API key was entered incorrectly in config.json. You can generate a key at https://console.developers.google.com/apis/credentials")
return None
spreadsheet_id = config.keys.spreadsheet_id
if spreadsheet_id == None or spreadsheet_id == "":
print("Your spreadsheet id was entered incorrectly in config.json. You can find your spreadsheet id in your Google Sheet's URL, e.g. https://docs.google.com/spreadsheets/u/1/d/{SPREADSHEET_ID_IS_HERE}/edit")
return None
values = (spreadsheet_id, range_, api_key)
r = requests.get("https://sheets.googleapis.com/v4/spreadsheets/%s/values/%s?key=%s" % values)
json = r.json()
members_json = None
if "values" in json:
members_json = json["values"]
else:
print("There was an issue retrieving data from your Google Sheet. Make sure that your spreadsheet_id is correct in config.json.")
return None
# Remove the first row since this just has column names
members_json.pop(0)
members = []
for member_data in members_json:
member = Member(member_data)
members.append(member)
return members
| StarcoderdataPython |
1651664 | from oscar.agent.commander.base_commander import BaseCommander
class QueueCommander(BaseCommander):
def __init__(self, subordinates):
super().__init__(subordinates)
self.__next_agent = 0
def choose_subordinate(self, obs):
"""
Round robin distribution
:return: The chosen
"""
playing_subordinate = self._subordinates[self.__next_agent]
self.__next_agent = (self.__next_agent + 1) % len(self._subordinates)
return playing_subordinate
| StarcoderdataPython |
3355974 | <reponame>Honcharov12/appscale
""" Handles operations related to instance registration. """
import json
import logging
import random
from kazoo.exceptions import NodeExistsError, NoNodeError
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from appscale.admin.instance_manager.constants import VERSION_REGISTRATION_NODE
from appscale.admin.instance_manager.instance import Instance
from appscale.common import appscale_info
from appscale.common.constants import VERSION_PATH_SEPARATOR
from appscale.hermes.constants import HERMES_PORT
logger = logging.getLogger('appscale-instance-manager')
class RoutingClient(object):
""" Handles operations related to instance registration. """
def __init__(self, zk_client, private_ip, secret):
""" Creates a new RoutingClient.
Args:
zk_client: A kazoo.client.KazooClient object.
private_ip: A string specifying the current machine's private IP address.
secret: A string specifying the deployment secret.
"""
self._private_ip = private_ip
self._secret = secret
self._zk_client = zk_client
@gen.coroutine
def get_failed_instances(self):
""" Fetches a list of failed instances on this machine according to HAProxy.
Returns:
A set of tuples specifying the version key and port of failed instances.
"""
load_balancer = random.choice(appscale_info.get_load_balancer_ips())
payload = {'include_lists': {
'proxy': ['name', 'servers'],
'proxy.server': ['private_ip', 'port', 'status']}
}
headers = {'AppScale-Secret': self._secret}
url = 'http://{}:{}/stats/local/proxies'.format(load_balancer, HERMES_PORT)
client = AsyncHTTPClient()
response = yield client.fetch(url, headers=headers, body=json.dumps(payload),
allow_nonstandard_methods=True)
proxy_stats = json.loads(response.body)['proxies_stats']
routed_versions = [server for server in proxy_stats
if server['name'].startswith('gae_')]
failed_instances = set()
for version in routed_versions:
version_key = version['name'][len('gae_'):]
for server in version['servers']:
if server['private_ip'] != self._private_ip:
continue
if not server['status'].startswith('DOWN'):
continue
failed_instances.add((version_key, server['port']))
raise gen.Return(failed_instances)
def register_instance(self, instance):
""" Adds a registration entry for an instance.
Args:
instance: An Instance.
"""
instance_entry = ':'.join([self._private_ip, str(instance.port)])
instance_node = '/'.join([VERSION_REGISTRATION_NODE, instance.version_key,
instance_entry])
try:
self._zk_client.create(instance_node, instance.revision.encode('utf-8'))
except NodeExistsError:
self._zk_client.set(instance_node, instance.revision.encode('utf-8'))
def unregister_instance(self, instance):
""" Removes a registration entry for an instance.
Args:
instance: An Instance.
"""
instance_entry = ':'.join([self._private_ip, str(instance.port)])
instance_node = '/'.join([VERSION_REGISTRATION_NODE, instance.version_key,
instance_entry])
try:
self._zk_client.delete(instance_node)
except NoNodeError:
pass
def declare_instance_nodes(self, running_instances):
""" Removes dead ZooKeeper instance entries and adds running ones.
Args:
running_instances: An iterable of Instances.
"""
registered_instances = set()
for version_key in self._zk_client.get_children(VERSION_REGISTRATION_NODE):
version_node = '/'.join([VERSION_REGISTRATION_NODE, version_key])
for instance_entry in self._zk_client.get_children(version_node):
machine_ip = instance_entry.split(':')[0]
if machine_ip != self._private_ip:
continue
port = int(instance_entry.split(':')[-1])
instance_node = '/'.join([version_node, instance_entry])
revision = self._zk_client.get(instance_node)[0]
revision_key = VERSION_PATH_SEPARATOR.join([version_key, revision])
registered_instances.add(Instance(revision_key, port))
# Remove outdated nodes.
for instance in registered_instances - running_instances:
self.unregister_instance(instance)
# Add nodes for running instances.
for instance in running_instances - registered_instances:
self.register_instance(instance)
| StarcoderdataPython |
1719624 | <filename>tests/test_scoring/test_scoring_functions.py<gh_stars>0
from scoring.scoring_functions import PreferenceScoring, RatioCharacteristicConfigurationPenalty, WeightedFeaturePenalty, ReduceScoring
from scoring.value_functions import ValueToValueFunction
from model.configuration_model import ConfigurationModel
from model.preferences_model import Preferences
from scoring.list_functions import Min, Average
from scoring.preferences_functions import FlattenPreferencesToListFunction
from model.product_structure_model import ProductStructureModel
preferences = Preferences({
'preferences': [
{
'user': "user0",
'ratings':[ {
'code': 'A1',
'value': 0
}, {
'code': 'A2',
'value': 1
}, {
'code': 'B1',
'value': 0.5
}
]
},
{
'user': "user1",
'ratings':[ {
'code': 'A1',
'value': 1
}, {
'code': 'B2',
'value': 1
}
]
}
]
})
currentConfiguration = ConfigurationModel({
'configuration': ['A2', 'B2'],
'variables': []
})
toRate = ConfigurationModel({
'configuration': ['A1', 'B2'],
'variables': []
})
product_structure = ProductStructureModel({
'ProductStructure': [
{
'elementId': 'A',
'name': 'parent_element A',
'type': "FEATURE",
'additionalData': [],
'children': [
{
'elementId': 'A1',
'name': 'child A1',
'children': [],
'additionalData': [],
'type': "CHARACTERISTIC"
},
{
'elementId': 'A2',
'name': 'child A2',
'children': [],
'additionalData': [],
'type': "CHARACTERISTIC"
}
],
},{
'elementId': 'B',
'name': 'parent_element B',
'type': "FEATURE",
'additionalData': [],
'children': [
{
'elementId': 'B1',
'name': 'child B1',
'children': [],
'additionalData': [],
'type': "CHARACTERISTIC"
},
{
'elementId': 'B2',
'name': 'child B2',
'children': [],
'additionalData': [],
'type': "CHARACTERISTIC"
}
],
},
]
})
class TestRatioCharacteristicConfigurationPenalty:
def test_simple_example(self):
function = RatioCharacteristicConfigurationPenalty(product_structure, [ValueToValueFunction()])
assert 0.5 == function.calc_score(currentConfiguration, preferences, toRate)
class TestWeightedFeaturePenalty:
def test_simple_example(self):
function = WeightedFeaturePenalty(product_structure, Min(), Average())
assert 0.375 == function.calc_score(currentConfiguration, preferences, toRate)
class TestReduceScoring:
def test_combined(self):
function = ReduceScoring([
RatioCharacteristicConfigurationPenalty(product_structure, [ValueToValueFunction()]),
WeightedFeaturePenalty(product_structure, Min(), Average())
])
assert 0.875 == function.calc_score(currentConfiguration, preferences, toRate)
def test_none(self):
function = ReduceScoring([])
assert 0 == function.calc_score(currentConfiguration, preferences, toRate)
class TestPreferenceScoring:
def test_simple_example(self):
function = PreferenceScoring(
FlattenPreferencesToListFunction(),
Min()
)
assert 0 == function.calc_score(currentConfiguration, preferences, toRate)
| StarcoderdataPython |
4841587 | # Sierpinski triangle.
# Run the Module (or type F5).
from turtle import *
def sierpinski(length, level):
speed(0) # Fastest speed.
if level==0:
return
begin_fill() # Fill shape.
color("red")
for i in range(3):
sierpinski(length/2,level-1)
fd(length)
lt(120) # Left turn 120 degrees.
end_fill()
| StarcoderdataPython |
1738289 | <reponame>Zeppelinen-DevOps/ansible-selvpc-modules<gh_stars>10-100
from ansible.module_utils.selvpc_utils import common, wrappers
@wrappers.create_object('project')
def create_project(module, client, project_name):
result = client.projects.create(project_name)
changed, msg = True, "Project '{}' has been created" \
.format(project_name)
return result, changed, msg
@wrappers.get_object('project')
@common.check_project_id
def get_project(module, client, project_id, project_name, show_list=False):
if not show_list:
return client.projects.show(project_id)
return client.projects.list()
@wrappers.update_object
@common.check_project_id
def update_project(module, client, project_id, project_name,
new_project_name):
changed, msg = False, "Nothing to change"
if not common.get_project_by_name(client, new_project_name):
client.projects.update(project_id, new_project_name)
changed, msg = True, "Project updated"
else:
msg = "Project with such name already exists"
return changed, msg
@wrappers.delete_object
@common.check_project_id
def delete_project(module, client, project_id, project_name):
client.projects.delete(project_id)
| StarcoderdataPython |
1710051 | #%%
import numpy as np
#%%
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1)
# %%
X, y = mnist["data"], mnist["target"]
# %%
y = y.astype(np.uint8)
# %%
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
# %%
from sklearn.neighbors import KNeighborsClassifier
kn_clf = KNeighborsClassifier()
#kn_clf.fit(X_train, y_train)
#%%
from datetime import datetime
start = datetime.now()
from sklearn.model_selection import GridSearchCV
param_grid = [
{'n_neighbors': [3, 5, 8], 'weights': ['uniform', 'distance']}
]
grid_search = GridSearchCV(kn_clf, param_grid, cv=3,
scoring='accuracy',
return_train_score=True, verbose=3, n_jobs=-1, pre_dispatch=2)
print(f'Start grid search {grid_search}')
grid_search.fit(X_train, y_train)
# %%
print(f'Completed in {datetime.now()-start} with best params: {grid_search.best_params_}')
# %%
import joblib
joblib.dump(grid_search, 'grid_search.pkl')
| StarcoderdataPython |
3254843 | <reponame>erleiuat/el-code<filename>src/test/langFiles/python.py<gh_stars>0
test = '123'
def bla(text):
if(text):
print(text)
else:
print('no')
bla(test) | StarcoderdataPython |
1628323 | <reponame>tcprescott/zabbix-jolokia-jmx<filename>scripts/jolokia_jmx_discovery.py
#!/usr/bin/python
import urllib2
import json
import sys
import time
#from pprint import pprint
#verify we have at least two arguments
if len(sys.argv) < 3:
print("at least two arguments required!")
exit(1)
#see if arg3 exists, if not then use 13337 as the default port
arg1 = sys.argv[1].replace(' ','%20')
arg2 = sys.argv[2].replace(' ','%20')
try:
port = sys.argv[3]
except IndexError:
port = "13337"
#argument 1 is the bean
#argument 2 is the key
url = "http://localhost:" + port + "/jolokia/read/" + arg1 + "/" + arg2
#url = "http://localhost:" + port + "/jolokia/read/" + sys.argv[1]
#grab the json
proxy_support = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_support)
page = opener.open(url).read()
#put in the response dictionary
resp_dict = json.loads(page)
#log what happened, this is for testing. Also let Zabbix know that the item sent was not supported.
if resp_dict['status']!=200:
print("ZBX_NOTSUPPORTED")
exit()
# open("/var/log/zabbix/jolokia_jmx.log","a+").write(str(time.time()) + " " + str(resp_dict['status']) + " " + url + "\n")
#print out the requested value
#data = list()
data = list()
line = {}
j = 0
for jmxobj in resp_dict['value']:
#print(resp_dict['value'][jmxobj][arg2])
#print(jmxobj)
jmxobj_dict = jmxobj.split(':')
#print jmxobj_dict
line["{#JMXOBJ}"] = jmxobj.replace('\"','%22')
line["{#JMXOBJ_BEAN}"] = jmxobj_dict[0]
jmxobj_attr = jmxobj_dict[1].split(',')
for i in range(len(jmxobj_attr)):
jmxobj_attr_s = jmxobj_attr[i]
attrname = jmxobj_attr_s.split('=')[0]
attrval = jmxobj_attr_s.split('=')[1].replace('\"','%22')
line['{#JMXOBJ_ATTR_' + attrname.upper() + '}'] = attrval
j = j + 1
data.append(line.copy())
print(json.dumps({"data": data}))
| StarcoderdataPython |
3214590 | <reponame>kuzaku-developers/disnake<gh_stars>0
from disnake.ui.item import *
from disnake.ui.item import __dict__ as __original_dict__
locals().update(__original_dict__)
| StarcoderdataPython |
123977 | import pandas as pd
import matplotlib.pyplot as plt
#import numpy as np
#from scipy.interpolate import interp1d
from matplotlib.pyplot import figure
font = {'family' : 'Times New Roman',
'size' : 28}
plt.rc('font', **font)
figure(num=None, figsize=(17, 5))
data = pd.read_csv('C:\\Users\\<NAME>\\Downloads\\Main - RFR Est1.csv')
plt.plot(data['Estimators'], data['MSE'], 'r*--', label='MSE')
plt.xlabel('Estimators')
plt.ylabel('Values')
plt.legend(loc='bottom right')
plt.xticks(data['Estimators'])
plt.grid(color='black', linestyle='-.', linewidth=2, alpha=0.3)
plt.show() | StarcoderdataPython |
3366198 | """File generated by TLObjects' generator. All changes will be ERASED"""
from ...tl.tlobject import TLObject
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
if TYPE_CHECKING:
from ...tl.types import TypeInputChannel, TypeInputPeer
class GetBroadcastStatsRequest(TLRequest):
CONSTRUCTOR_ID = 0xab42441a
SUBCLASS_OF_ID = 0x7ff25428
def __init__(self, channel: 'TypeInputChannel', dark: Optional[bool]=None):
"""
:returns stats.BroadcastStats: Instance of BroadcastStats.
"""
self.channel = channel
self.dark = dark
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetBroadcastStatsRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'dark': self.dark
}
def _bytes(self):
return b''.join((
b'\x1aDB\xab',
struct.pack('<I', (0 if self.dark is None or self.dark is False else 1)),
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_dark = bool(flags & 1)
_channel = reader.tgread_object()
return cls(channel=_channel, dark=_dark)
class GetMegagroupStatsRequest(TLRequest):
CONSTRUCTOR_ID = 0xdcdf8607
SUBCLASS_OF_ID = 0x5b59be8d
def __init__(self, channel: 'TypeInputChannel', dark: Optional[bool]=None):
"""
:returns stats.MegagroupStats: Instance of MegagroupStats.
"""
self.channel = channel
self.dark = dark
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetMegagroupStatsRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'dark': self.dark
}
def _bytes(self):
return b''.join((
b'\x07\x86\xdf\xdc',
struct.pack('<I', (0 if self.dark is None or self.dark is False else 1)),
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_dark = bool(flags & 1)
_channel = reader.tgread_object()
return cls(channel=_channel, dark=_dark)
class GetMessagePublicForwardsRequest(TLRequest):
CONSTRUCTOR_ID = 0x5630281b
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, channel: 'TypeInputChannel', msg_id: int, offset_rate: int, offset_peer: 'TypeInputPeer', offset_id: int, limit: int):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.channel = channel
self.msg_id = msg_id
self.offset_rate = offset_rate
self.offset_peer = offset_peer
self.offset_id = offset_id
self.limit = limit
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
self.offset_peer = utils.get_input_peer(await client.get_input_entity(self.offset_peer))
def to_dict(self):
return {
'_': 'GetMessagePublicForwardsRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'msg_id': self.msg_id,
'offset_rate': self.offset_rate,
'offset_peer': self.offset_peer.to_dict() if isinstance(self.offset_peer, TLObject) else self.offset_peer,
'offset_id': self.offset_id,
'limit': self.limit
}
def _bytes(self):
return b''.join((
b'\x1b(0V',
self.channel._bytes(),
struct.pack('<i', self.msg_id),
struct.pack('<i', self.offset_rate),
self.offset_peer._bytes(),
struct.pack('<i', self.offset_id),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_msg_id = reader.read_int()
_offset_rate = reader.read_int()
_offset_peer = reader.tgread_object()
_offset_id = reader.read_int()
_limit = reader.read_int()
return cls(channel=_channel, msg_id=_msg_id, offset_rate=_offset_rate, offset_peer=_offset_peer, offset_id=_offset_id, limit=_limit)
class GetMessageStatsRequest(TLRequest):
CONSTRUCTOR_ID = 0xb6e0a3f5
SUBCLASS_OF_ID = 0x9604a322
def __init__(self, channel: 'TypeInputChannel', msg_id: int, dark: Optional[bool]=None):
"""
:returns stats.MessageStats: Instance of MessageStats.
"""
self.channel = channel
self.msg_id = msg_id
self.dark = dark
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetMessageStatsRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'msg_id': self.msg_id,
'dark': self.dark
}
def _bytes(self):
return b''.join((
b'\xf5\xa3\xe0\xb6',
struct.pack('<I', (0 if self.dark is None or self.dark is False else 1)),
self.channel._bytes(),
struct.pack('<i', self.msg_id),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_dark = bool(flags & 1)
_channel = reader.tgread_object()
_msg_id = reader.read_int()
return cls(channel=_channel, msg_id=_msg_id, dark=_dark)
class LoadAsyncGraphRequest(TLRequest):
CONSTRUCTOR_ID = 0x621d5fa0
SUBCLASS_OF_ID = 0x9b903153
def __init__(self, token: str, x: Optional[int]=None):
"""
:returns StatsGraph: Instance of either StatsGraphAsync, StatsGraphError, StatsGraph.
"""
self.token = token
self.x = x
def to_dict(self):
return {
'_': 'LoadAsyncGraphRequest',
'token': self.token,
'x': self.x
}
def _bytes(self):
return b''.join((
b'\xa0_\x1db',
struct.pack('<I', (0 if self.x is None or self.x is False else 1)),
self.serialize_bytes(self.token),
b'' if self.x is None or self.x is False else (struct.pack('<q', self.x)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_token = reader.tgread_string()
if flags & 1:
_x = reader.read_long()
else:
_x = None
return cls(token=_token, x=_x)
| StarcoderdataPython |
4837606 | <gh_stars>1-10
from django.db import models
from django.core.validators import (
RegexValidator,
MinLengthValidator,
MaxLengthValidator,
)
from django.contrib.auth.models import User
class Worker(User):
PERMISSION_CHOICES = (
('1', 'Worker'),
('2', 'Admin'),
)
permission = models.CharField(
default=1,
max_length=1,
null=False,
blank=False,
choices=PERMISSION_CHOICES,
validators=[
MinLengthValidator(1),
MaxLengthValidator(1)
]
)
cpf = models.CharField(
max_length=11,
default='None',
null=False,
unique=True,
blank=False,
validators=[
MinLengthValidator(11),
MaxLengthValidator(11),
RegexValidator(
regex=(
r'([0-9]{2}[\.]?[0-9]{3}[\.]?[0-9]{3}[\/]?[0-9]{4}[-]?[0-9]{2})'
r'|([0-9]{3}[\.]?[0-9]{3}[\.]?[0-9]{3}[-]?[0-9]{2})'
)
)
]
)
| StarcoderdataPython |
37198 | import unittest
import mock
import Tkinter
from cursecreator import Application
class TestNPCCreator(unittest.TestCase):
def setUp(self):
root = Tkinter.Tk()
self.app = Application(root)
def test_attribute_fixer(self):
self.assertTrue(self.app.attribute_fixer("health", 0))
self.assertFalse(self.app.attribute_fixer("banana", 0))
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
1697349 | """Delete empty keywords
Revision ID: dac430582787
Revises: <PASSWORD>
Create Date: 2020-01-30 20:08:37.311976+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dac<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
keywords = sa.Table(
'keywords', sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('wheel_data_id', sa.Integer, nullable=False),
sa.Column('name', sa.Unicode(2048), nullable=False),
sa.UniqueConstraint('wheel_data_id', 'name'),
)
def sql_strip(col):
# This produces `TRIM(col, chars)`, which works in PostgreSQL and SQLite
# but not MySQL:
return sa.func.trim(col, ' \t\n\r\x0B\f')
# MySQL requires the SQL syntax `TRIM(chars FROM col)` (note the reversed
# order of operands), which also works in PostgreSQL but not SQLite. I
# can't figure out how to express this in SQLALchemy without using
# `text()`, and on top of that is the problem of ensuring that the correct
# syntax is emitted for whichever database type is in use.
def upgrade():
conn = op.get_bind()
conn.execute(keywords.delete().where(sql_strip(keywords.c.name) == ''))
conn.execute(keywords.update().values(name=sql_strip(keywords.c.name)))
def downgrade():
pass
| StarcoderdataPython |
3383037 | import pandas as pd
import unittest
import ray
from ray import tune
from ray.tune import session
def _check_json_val(fname, key, val):
with open(fname, "r") as f:
df = pd.read_json(f, typ="frame", lines=True)
return key in df.columns and (df[key].tail(n=1) == val).all()
class TrackApiTest(unittest.TestCase):
def tearDown(self):
session.shutdown()
ray.shutdown()
def testSoftDeprecation(self):
"""Checks that tune.track.log code does not break."""
from ray.tune import track
ray.init(num_cpus=2)
def testme(config):
for i in range(config["iters"]):
track.log(iteration=i, hi="test")
trials = tune.run(testme, config={"iters": 5}).trials
trial_res = trials[0].last_result
self.assertTrue(trial_res["hi"], "test")
self.assertTrue(trial_res["training_iteration"], 5)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| StarcoderdataPython |
174047 | <filename>shadowhawk/plugins/shell.py
import os
import re
import html
import asyncio
from time import sleep
from io import BytesIO
from pyrogram import Client, filters
from .. import config, help_dict, log_errors, public_log_errors, self_destruct
# All the processes we've started
processes = {}
def _dumb_wait(pid, timeout):
time = 0
try:
while time < timeout:
blah, idk = os.waitpid(pid, os.WNOHANG)
if not blah:
sleep(1)
time += 1
else:
return True
except ChildProcessError as e:
if e.errno == 10:
return True
# Assume it never exited
return False
return False
SHELL_REGEX = '^(?:' + '|'.join(map(re.escape, config['config']['prefixes'])) + r')(?:(?:ba)?sh|shell|term(?:inal)?)\s+(.+)(?:\n([\s\S]+))?$'
@Client.on_message(~filters.sticker & ~filters.via_bot & ~filters.edited & ~filters.forwarded & filters.me & filters.regex(SHELL_REGEX))
@log_errors
@public_log_errors
async def shell(client, message):
match = re.match(SHELL_REGEX, message.text.markdown)
if not match:
return
command = message.matches[0].group(1)
stdin = message.matches[0].group(2)
process = await asyncio.create_subprocess_shell(command, stdin=asyncio.subprocess.PIPE if stdin else None, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
process.cmdline = command
processes[process.pid] = process
reply = await message.reply_text(f'Executing process {process.pid}...')
stdout, stderr = await process.communicate(stdin.encode() if stdin else None)
returncode = process.returncode
text = f'<b>Exit Code:</b> <code>{returncode}</code>\n'
if process.pid in processes:
del processes[process.pid]
stdout = stdout.decode().replace('\r', '').strip('\n').rstrip()
stderr = stderr.decode().replace('\r', '').strip('\n').rstrip()
if stderr:
text += f'<code>{html.escape(stderr)}</code>\n'
if stdout:
text += f'<code>{html.escape(stdout)}</code>'
# send as a file if it's longer than 4096 bytes
if len(text) > 4096:
out = stderr.strip() + "\n" + stdout.strip()
f = BytesIO(out.strip().encode('utf-8'))
f.name = "output.txt"
await reply.delete()
await message.reply_document(f, caption=f'<b>Exit Code:</b> <code>{returncode}</code>')
else:
await reply.edit_text(text)
@Client.on_message(~filters.sticker & ~filters.via_bot & ~filters.edited & filters.me & filters.command(['kill', 'terminate'], prefixes=config['config']['prefixes']))
@log_errors
@public_log_errors
async def terminate(client, message):
print("Received kill:", message.command)
command = message.command
command.pop(0)
if command:
try:
pid = int(command[0])
except ValueError:
await self_destruct(message, f'<code>"{command[0]}" is not a valid process id.</code>')
return
process = None
if pid in processes:
process = processes[pid]
else:
await self_destruct(message, f'<code>{pid} is not a process started by us</code>')
return
print("Terminating!")
process.terminate()
if not _dumb_wait(process.pid, 30):
await message.edit(f"<code>Sending SIGKILL to the process.</code>")
process.kill()
if not _dumb_wait(process.pid, 30):
await message.edit(f'<code>{pid} is a cockroach and cannot be killed.</code>')
else:
if pid in processes:
del processes[pid]
await message.edit(f'<code>Child {pid} was murdered successfully! \N{hocho}</code>')
else:
if pid in processes:
del processes[pid]
await self_destruct(message, f'<code>{pid} terminated successfully!</code>')
else:
await self_destruct(message, "<code>You must specify a process ID to terminate</code>")
@Client.on_message(~filters.sticker & ~filters.via_bot & ~filters.edited & filters.me & filters.command(['jobs'], prefixes=config['config']['prefixes']))
@log_errors
@public_log_errors
async def jobs(client, message):
text = "<b>List of running background tasks:</b>\n"
for p, j in processes.items():
text += f"<b>{j.pid}:</b> <code>{j.cmdline}</code>\n"
await message.edit(text)
help_dict['shell'] = ('Shell',
'''{prefix}sh <i><command></i> \\n <i>[stdin]</i> - Executes <i><command></i> in shell
Aliases: {prefix}bash, {prefix}shell, {prefix}term, {prefix}terminal
{prefix}kill <i><pid></i> - Kills a process based on it's pid
Aliases: {prefix}terminate
{prefix}jobs - Lists the running background processes (if any)
''')
| StarcoderdataPython |
1648052 | <gh_stars>0
import os
from django.conf import settings
API_KEY_FILE_PATH = os.path.join(settings.BASE_DIR, 'google_api_key.txt')
def get_google_api_key():
with open(API_KEY_FILE_PATH, 'r') as api_key_file:
api_key = api_key_file.read()
return api_key
| StarcoderdataPython |
1756753 | import torch.utils.data as data
import os
import os.path
#from plyfile import PlyData, PlyElement
from Datasets.plyfile.plyfile import PlyData
import numpy as np
#import main import args as args
def load_ply(dir,file_name, with_faces=False, with_color=False):
path = os.path.join(dir,file_name)
ply_data = PlyData.read(path)
points = ply_data['vertex']
points = np.vstack([points['x'], points['y'], points['z']]).T
ret_val = [points]
if with_faces:
faces = np.vstack(ply_data['face']['vertex_indices'])
ret_val.append(faces)
if with_color:
r = np.vstack(ply_data['vertex']['red'])
g = np.vstack(ply_data['vertex']['green'])
b = np.vstack(ply_data['vertex']['blue'])
color = np.hstack((r, g, b))
ret_val.append(color)
if len(ret_val) == 1: # Unwrap the list
ret_val = ret_val[0]
return ret_val
def npy_loader(dir,file_name):
path = os.path.join(dir,file_name)
output = np.load(path)
return output
class ListDataset(data.Dataset):
def __init__(self, input_root,target_root, path_list, net_name, co_transforms = None, input_transforms = None, target_transforms = None,args=None,mode=None,give_name = False):
self.input_root = input_root
if net_name=='auto_encoder' : # As target root is same as input root for auto encoder
self.target_root = input_root
else:
self.target_root = target_root
self.path_list = path_list
self.net_name = net_name
if(self.net_name=='GAN'):
self.loader = npy_loader
else:
self.loader = load_ply
self.input_transforms = input_transforms
self.target_transforms = target_transforms
self.co_transforms = co_transforms
self.args = args
self.mode = mode
self.give_name =give_name
def __getitem__(self,index):
inputs_list,targets_list = self.path_list[index]
input_name = inputs_list[0]
input_name = input_name[:-4]
target_name = targets_list[0]
target_name = target_name[:-4]
inputs = self.loader(self.input_root,inputs_list[0])
targets = self.loader(self.target_root,targets_list[0])
if self.mode == 'train':
if self.co_transforms is not None:
if self.net_name=='GAN': # No target transform on GFV
inputs = self.co_transforms(inputs)
else:
inputs,targets = self.co_transforms(inputs,targets)
if self.input_transforms is not None:
inputs = self.input_transforms(inputs)
# if self.target_transforms is not None:
# targets = self.target_transforms(targets)
if(self.give_name==True):
return inputs, input_name
else:
return inputs
def __len__(self):
return len(self.path_list)
| StarcoderdataPython |
1721751 | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tuning class."""
from typing import Optional
from lpot.ux.utils.json_serializer import JsonSerializer
DIGITS = 4
class Metric(JsonSerializer):
"""Metric represents data which is sent from Tuning and Benchmark."""
# TODO: Split into Accuracy, Performance if necessary for Benchmark
def __init__(self) -> None:
"""Initialize configuration Dataset class."""
super().__init__()
self._acc_fp32: Optional[float] = None
self._acc_int8: Optional[float] = None
self._perf_latency_fp32: Optional[float] = None
self._perf_latency_int8: Optional[float] = None
self._perf_throughput_int8: Optional[float] = None
self._perf_throughput_fp32: Optional[float] = None
self._tuning_time: Optional[float] = None
self._size_fp32: Optional[float] = None
self._size_int8: Optional[float] = None
@property
def acc_fp32(self) -> Optional[float]:
"""Accuracy for float32."""
return self._acc_fp32
@acc_fp32.setter
def acc_fp32(self, value: str) -> None:
"""Set accuracy fp32 from value."""
float_value = float(value)
if float_value > 1:
float_value /= 100
self._acc_fp32 = round(float_value, DIGITS)
@property
def acc_int8(self) -> Optional[float]:
"""Accuracy for int8."""
return self._acc_int8
@acc_int8.setter
def acc_int8(self, value: str) -> None:
"""Set accuracy int8 from value."""
float_value = float(value)
if float_value > 1:
float_value /= 100
self._acc_int8 = round(float_value, DIGITS)
@property
def perf_latency_fp32(self) -> Optional[float]:
"""Latency for fp32."""
return self._perf_latency_fp32
@perf_latency_fp32.setter
def perf_latency_fp32(self, value: str) -> None:
"""Set latency for fp32."""
self._perf_latency_fp32 = round(float(value), DIGITS)
if not self.perf_throughput_fp32:
self.perf_throughput_fp32 = self.calculate_throughput(
self._perf_latency_fp32,
)
@property
def perf_latency_int8(self) -> Optional[float]:
"""Latency for int8."""
return self._perf_latency_int8
@perf_latency_int8.setter
def perf_latency_int8(self, value: str) -> None:
"""Set latency for int8."""
self._perf_latency_int8 = round(float(value), DIGITS)
if not self.perf_throughput_int8:
self.perf_throughput_int8 = self.calculate_throughput(
self._perf_latency_int8,
)
@property
def perf_throughput_int8(self) -> Optional[float]:
"""Throughput for int8 model."""
return self._perf_throughput_int8
@perf_throughput_int8.setter
def perf_throughput_int8(self, value: str) -> None:
"""Set throughput from value for int8 model."""
self._perf_throughput_int8 = round(float(value), DIGITS)
@property
def perf_throughput_fp32(self) -> Optional[float]:
"""Throughput for fp32 model."""
return self._perf_throughput_fp32
@perf_throughput_fp32.setter
def perf_throughput_fp32(self, value: str) -> None:
"""Set throughput from value for fp32 model."""
self._perf_throughput_fp32 = round(float(value), DIGITS)
def insert_data(self, attribute: str, value: str) -> None:
"""Set attribute value."""
self.__setattr__(attribute, value)
@staticmethod
def calculate_throughput(value: float) -> float:
"""
Calculate throughput based on latency.
Right now 1000 represents number of images in dataset.
TODO: change 1000 to the batch size when Benchmark is ready
"""
return 1000 / value
@property
def size_fp32(self) -> Optional[float]:
"""Model size for float32."""
return self._size_fp32
@size_fp32.setter
def size_fp32(self, value: str) -> None:
"""Set model size fp32 from value."""
self._size_fp32 = float(value)
@property
def size_int8(self) -> Optional[float]:
"""Model size for int8."""
return self._size_int8
@size_int8.setter
def size_int8(self, value: str) -> None:
"""Set model size int8 from value."""
self._size_int8 = float(value)
@property
def tuning_time(self) -> Optional[float]:
"""Tuning time."""
return self.tuning_time
@tuning_time.setter
def tuning_time(self, value: str) -> None:
"""Set tuning_time value."""
self.tuning_time = float(value)
| StarcoderdataPython |
1773572 | import re
import subprocess
import os
import opentamp
from core.internal_repr.action import Action
from core.internal_repr.plan import Plan
CLEANUP = False
PATCH = True
class HLSolver(object):
"""
HLSolver provides an interface to the chosen task planner.
"""
def __init__(self, domain_config=None, abs_domain=None):
self.abs_domain = abs_domain if abs_domain else self._translate_domain(domain_config, first_ts_pre=True)
def _translate_domain(self, domain_config):
"""
Translates domain configuration file to representation required for task planner.
E.g. for an FFSolver this would return a PDDL domain file. Only called once,
upon creation of an HLSolver object.
"""
raise NotImplementedError("Override this.")
def translate_problem(self, concr_prob):
"""
Translates concrete (instantiated) problem, a Problem object, to representation required for task planner.
E.g. for an FFSolver this would return a PDDL problem file.
"""
raise NotImplementedError("Override this.")
def solve(self, abs_prob, domain, concr_prob, prefix=None, debug=False):
"""
Solves the problem and returns a Plan object.
abs_prob: what got returned by self.translate_problem()
domain: Domain object
concr_prob: Problem object
"""
raise NotImplementedError("Override this.")
class HLState(object):
"""
Tracks the HL state so that HL state information can be added to preds dict
attribute in the Action class. For HLSolver use only.
"""
def __init__(self, init_preds):
self._pred_dict = {}
for pred in init_preds:
rep = HLState.get_rep(pred)
self._pred_dict[rep] = pred
def get_preds(self):
return list(self._pred_dict.values())
def in_state(self, pred):
rep = HLState.get_rep(pred)
return rep in self._pred_dict
def update(self, pred_dict_list):
for pred_dict in pred_dict_list:
self.add_pred_from_dict(pred_dict)
def add_pred_from_dict(self, pred_dict):
if pred_dict["hl_info"] is "eff":
negated = pred_dict["negated"]
pred = pred_dict["pred"]
rep = HLState.get_rep(pred)
if negated and self.in_state(pred):
del self._pred_dict[rep]
elif not negated and not self.in_state(pred):
self._pred_dict[rep] = pred
@staticmethod
def get_rep(pred):
s = "(%s "%(pred.get_type())
for param in pred.params[:-1]:
s += param.name + " "
s += pred.params[-1].name + ")"
return s
class FFSolver(HLSolver):
FF_EXEC = os.getcwd() + '/opentamp'+"/task_planners/FF-v2.3/ff"
FILE_PREFIX = "temp_"
def _parse_precondition_ts(self, pre, ts):
preds = ''
so_far = []
ts = ts.strip().split()
ts = [t.split(':') for t in ts]
ts = [(int(t[0]), int(t[1])) for t in ts]
count, inds = 0, [0]
pre = pre[5:-1]
for i, token in enumerate(pre):
if token == "(":
count += 1
if token == ")":
count -= 1
if count == 0:
inds.append(i+1)
for i in range(len(inds)):
if ts[i][0] == 0 and ts[i][1] <= 0:
pred = pre[inds[i]:inds[i+1]] if i+1 < len(inds) else pre[inds[i]:]
if pred not in so_far:
preds += pred
so_far.append(pred)
return '(and {0})'.format(preds)
def _parse_exclude(self, preds):
parsed = []
preds = preds[4:-1].strip()
count, ind = 0, 0
for i, token in enumerate(preds):
if token == "(":
count += 1
if token ==")":
count -= 1
if count == 0:
parsed.append(preds[ind:i+1].strip())
ind = i + 1
for i, pred in enumerate(parsed):
if pred.find('/') >= 0:
new_pred = ''
cur_pred = pred
eq = ''
depth = 0
while cur_pred.find('forall') >= 0:
depth += 1
new_pred += '(forall '
m = re.match("\(\s*forall", cur_pred)
cur_pred = cur_pred[m.span()[1]:-1].strip()
g = re.match("\((.*?)\)(.*)", cur_pred).groups()
v = g[0].split("/")
quant = '({0}) '.format(v[0].strip())
for e in v[1:]:
eq += '(not (= {0} {1}))'.format(v[0].split('-')[0].strip(), e.strip())
cond = g[1].strip()
new_pred += quant
if cond.find('forall') >= 0:
cur_pred = cond
else:
new_pred += '(when {0} {1})'.format(eq, cond)
eq += ''
for _ in range(depth):
new_pred += ')'
parsed[i] = new_pred
out = '(and '
for step in parsed:
out += step + ' '
out += ')'
return out
def _translate_domain(self, domain_config, first_ts_pre=False):
"""
Argument:
domain_config: parsed domain configuration that defines the problem
(Dict\{String: String\})
Return:
translated domain in .PDDL recognizable by HLSolver (String)
"""
dom_str = "; AUTOGENERATED. DO NOT EDIT.\n\n(define (domain robotics)\
\n(:requirements :strips :equality :typing)\n(:types\n"
for t in domain_config["Types"].split(";"):
dom_str += t.strip().split("(")[0].strip().replace(',', '') + " "
dom_str += "- base_object"
for st in domain_config.get("Subtypes", "").split(";"):
dom_str += "\n"
dom_str += st.strip().replace(',', '')
dom_str += ")\n\n(:predicates\n"
for p_defn in domain_config["Derived Predicates"].split(";"):
p_name, p_params = list(map(str.strip, p_defn.split(",", 1)))
p_params = [s.strip() for s in p_params.split(",")]
dom_str += "(%s "%p_name
for i, param in enumerate(p_params):
dom_str += "?var%d - %s "%(i, param)
dom_str += ")\n"
dom_str += ")\n\n"
for key in list(domain_config.keys()):
if key.startswith("Action"):
count, inds = 0, [0]
for i, token in enumerate(domain_config[key]):
if token == "(":
count += 1
if token == ")":
count -= 1
if count == 0:
inds.append(i+1)
params = domain_config[key][inds[0]:inds[1]].strip()
pre = domain_config[key][inds[1]:inds[2]].strip()
eff = domain_config[key][inds[2]:inds[3]].strip()
pre = self._parse_exclude(pre)
eff = self._parse_exclude(eff)
if first_ts_pre:
ts = domain_config[key][inds[3]:].strip()
pre = self._parse_precondition_ts(pre, ts)
dom_str += "(:action %s\n:parameters %s\n:precondition %s\n:effect %s\n)\n\n"%(key.split()[1], params, pre, eff)
dom_str += ")"
return dom_str
def translate_problem(self, concr_prob, initial=None, goal=None):
"""
Argument:
concr_prob: problem that defines initial state and goal configuration.
(internal_repr/problem)
Return:
translated problem in .PDDL recognizable by HLSolver (String)
"""
prob_str = "; AUTOGENERATED. DO NOT EDIT.\n\n(define (problem ff_prob)\n(:domain robotics)\n(:objects\n"
for param in list(concr_prob.init_state.params.values()):
prob_str += "%s - %s\n"%(param.name, param.get_type())
prob_str += ")\n\n(:init\n"
used = []
init_state = concr_prob.init_state
init_preds = list(init_state.preds) + list(init_state.invariants)
for pred in init_preds:
if initial is not None and not pred._init_include: continue
cur_str = ''
cur_str += "(%s "%pred.get_type()
for param in pred.params:
cur_str += "%s "%param.name
cur_str += ")\n"
if cur_str in used: continue
used.append(cur_str)
prob_str += cur_str
if initial is not None:
initial = set(initial)
for pred in initial:
prob_str += pred
concr_prob.initial = initial
prob_str += ")\n\n(:goal\n(and "
if goal is None:
for pred in concr_prob.goal_preds:
prob_str += "(%s "%pred.get_type()
for param in pred.params:
prob_str += "%s "%param.name
prob_str += ") "
else:
for pred in goal:
prob_str += pred
concr_prob.goal = goal
prob_str += ")\n)\n)"
return prob_str
def run_planner(self, abs_prob, domain, prefix=None, label=''):
plan_str = self._run_planner(self.abs_domain, abs_prob, label=label)
if plan_str == Plan.IMPOSSIBLE:
return plan_str
if prefix:
for i in range(len(plan_str)):
step, action = plan_str[i].split(':')
plan_str[i] = str(len(prefix) + int(step)) + ':' + action
plan_str = prefix + plan_str
return plan_str
def solve(self, abs_prob, domain, concr_prob, prefix=None, label='', debug=False):
"""
Argument:
abs_prob: translated problem in .PDDL recognizable by HLSolver (String)
domain: domain in which problem is defined. (internal_repr/domain)
concr_prob: problem that defines initial state and goal configuration.
(internal_repr/problem)
Return:
Plan Object for ll_solver to optimize. (internal_repr/plan)
"""
plan_str = self.run_planner(abs_prob, self.abs_domain, prefix=prefix, label=label)
plan = self.get_plan(plan_str, domain, concr_prob, concr_prob.initial, debug=debug)
if type(plan) is not str:
plan.plan_str = plan_str
plan.goal = concr_prob.goal
plan.initial = concr_prob.initial
return plan
def get_plan(self, plan_str, domain, concr_prob, initial=None, reuse_params=None, debug=False):
"""
Argument:
plan_str: list of high level plan. (List(String))
domain: domain in which problem is defined. (internal_repr/domain)
concr_prob: problem that defines initial state and goal configuration.
(internal_repr/problem)
Return:
Plan Object for ll_solver to optimize. (internal_repr/plan)
Note: Actions, and Parameters are created here.
"""
if plan_str == Plan.IMPOSSIBLE:
return plan_str
openrave_env = concr_prob.env
sess = concr_prob.sess
plan_horizon = self._extract_horizon(plan_str, domain)
if reuse_params is None:
params = self._spawn_plan_params(concr_prob, plan_horizon)
else:
params = reuse_params
actions = self._spawn_actions(plan_str, domain, params,
plan_horizon, concr_prob, openrave_env,
initial, debug=debug)
plan = Plan(params, actions, plan_horizon, openrave_env, sess=sess)
plan.start = concr_prob.start_action
plan.prob = concr_prob
plan.domain = domain
plan.plan_str = plan_str
return plan
def _extract_horizon(self, plan_str, domain):
"""
Argument:
plan_str: list of high level plan. (List(String))
domain: domain in which problem is defined. (internal_repr/domain)
Return:
planning horizon for the entire plan. (Integer)
"""
hor = 1
for action_str in plan_str:
spl = action_str.split()
a_name = spl[1].lower()
## subtract 1 b/c subsequent actions have an overlapping
## first and last state
hor += domain.action_schemas[a_name].horizon - 1
return hor
def _spawn_plan_params(self, concr_prob, plan_horizon):
"""
Argument:
concr_prob: problem that defines initial state and goal configuration.
(internal_repr/problem)
plan_horizon: planning horizon for the entire plan. (Integer)
Return:
A mapping between parameter name and parameter
(Dict\{String: internal_repr/parameter\})
"""
params = {}
for p_name, p in list(concr_prob.init_state.params.items()):
params[p_name] = p.copy(plan_horizon, True)
return params
def _spawn_actions(self, plan_str, domain, params,
plan_horizon, concr_prob, env,
initial=[], debug=False):
"""
Argument:
plan_str: list of high level plan. (List(String))
domain: domain in which problem is defined. (internal_repr/domain)
params: dictionary mapping name to parameter.
(Dict\{String: internal_repr/parameter\})
plan_horizon: planning horizon for the entire plan. (Integer)
concr_prob: problem that defines initial state and goal configuration.
(internal_repr/problem)
env: Openrave Environment for planning (openravepy/Environment)
Return:
list of actions to plan over (List(internal_repr/action))
"""
actions = []
curr_h = 0
hl_state = HLState(concr_prob.init_state.preds)
for action_str in plan_str:
spl = action_str.split()
step = int(spl[0].split(":")[0])
a_name, a_args = spl[1].lower(), list(map(str.lower, spl[2:]))
a_schema = domain.action_schemas[a_name]
var_names, expected_types = list(zip(*a_schema.params))
bindings = dict(list(zip(var_names, list(zip(a_args, expected_types)))))
preds = []
init_preds = [pred.get_rep() for pred in concr_prob.init_state.preds]
invariant_preds = [pred.get_rep() for pred in concr_prob.init_state.invariants]
# Initial contains predicates only enforced at the first ts of the plan
if curr_h == 0 and initial is not None:
init_preds += initial
for i, pred in enumerate(init_preds):
spl = list(map(str.strip, pred.strip("() ").split()))
p_name, p_args = spl[0], spl[1:]
p_objs = []
for n in p_args:
try:
p_objs.append(params[n])
except KeyError:
raise ProblemConfigException("Parameter '%s' for predicate type '%s' not defined in domain file."%(n, p_name))
try:
init_pred = domain.pred_schemas[p_name].pred_class(name="initpred%d"%i,
params=p_objs,
expected_param_types=domain.pred_schemas[p_name].expected_params,
env=env, debug=debug)
preds.append({'negated': False, 'pred': init_pred, 'hl_info': 'hl_state', 'active_timesteps': (0,0)})
except TypeError as e:
# print(("type error for {}".format(pred)))
pass
# Invariant predicates are enforced every timestep
for i, pred in enumerate(invariant_preds):
spl = list(map(str.strip, pred.strip("() ").split()))
p_name, p_args = spl[0], spl[1:]
p_objs = []
for n in p_args:
try:
p_objs.append(params[n])
except KeyError:
raise ProblemConfigException("Parameter '%s' for predicate type '%s' not defined in domain file."%(n, p_name))
try:
invariant_pred = domain.pred_schemas[p_name].pred_class(name="invariantpred%d"%i,
params=p_objs,
expected_param_types=domain.pred_schemas[p_name].expected_params,
env=env, debug=debug)
ts = (curr_h, curr_h + a_schema.horizon - 1)
preds.append({'negated': False, 'pred': invariant_pred, 'hl_info': 'invariant', 'active_timesteps': ts})
except TypeError as e:
print(("type error for {}".format(pred)))
for p_d in a_schema.preds:
pred_schema = domain.pred_schemas[p_d["type"]]
arg_valuations = [[]]
for a in p_d["args"]:
if a in bindings:
# if we have a binding, add the arg name to all valuations
for val in arg_valuations:
val.append(bindings[a])
else:
# handle universally quantified params by creating a valuation for each possibility
excl = [bindings[e][0] for e in bindings if e in a_schema.exclude_params[a]]
p_type = a_schema.universally_quantified_params[a]
bound_names = [bindings[key][0] for key in bindings]
# arg_names_of_type = [k for k, v in params.items() if v.get_type() == p_type and k not in bound_names]
arg_names_of_type = [k for k, v in list(params.items()) if p_type in v.get_type(True) and k not in excl]
arg_valuations = [val + [(name, p_type)] for name in arg_names_of_type for val in arg_valuations]
for val in arg_valuations:
val, types = list(zip(*val))
try:
pred = pred_schema.pred_class("placeholder", [params[v] for v in val], pred_schema.expected_params, env=env, debug=debug)
except:
pred = pred_schema.pred_class("placeholder", [params[v] for v in val], pred_schema.expected_params, env=env)
ts = (p_d["active_timesteps"][0] + curr_h, p_d["active_timesteps"][1] + curr_h)
preds.append({"negated": p_d["negated"], "hl_info": p_d["hl_info"], "active_timesteps": ts, "pred": pred})
# updating hl_state
hl_state.update(preds)
actions.append(Action(step, a_name, (curr_h, curr_h + a_schema.horizon - 1), [params[arg] for arg in a_args], preds))
curr_h += a_schema.horizon - 1
return actions
def _run_planner(self, abs_domain, abs_prob, label=''):
"""
Argument:
abs_domain: translated domain in .PDDL recognizable by HLSolver (String)
abs_prob: translated problem in .PDDL recognizable by HLSolver (String)
Return:
list of high level plan (List(String))
Note:
High level planner gets called here.
"""
if not os.path.isdir('temp'):
os.mkdir('temp')
fprefix = 'temp/'+label+'_'+FFSolver.FILE_PREFIX
with open("%sdom.pddl"%(fprefix), "w") as f:
f.write(abs_domain)
with open("%sprob.pddl"%(fprefix), "w") as f:
f.write(abs_prob)
with open("%sprob.output"%(fprefix), "w") as f:
try:
subprocess.call([FFSolver.FF_EXEC, "-o", "%sdom.pddl"%(fprefix), "-f", "%sprob.pddl"%(fprefix)], stdout=f, timeout=300)
except subprocess.TimeoutExpired:
print('Error: FF solve timed out!')
with open("%sprob.output"%(fprefix), "r") as f:
s = f.read()
if "goal can be simplified to FALSE" in s or "problem proven unsolvable" in s:
plan = Plan.IMPOSSIBLE
else:
try:
plan = [x for x in map(str.strip, s.split("found legal plan as follows")[1].split("time")[0].replace("step", "").split("\n")) if x]
except:
print('Error in filter for', s, fprefix, '\n\n', abs_prob, '\n\n')
plan = Plan.IMPOSSIBLE
if CLEANUP:
subprocess.call(["rm", "-f", "%sdom.pddl"%fprefix,
"%sprob.pddl"%fprefix,
"%sprob.pddl.soln"%fprefix,
"%sprob.output"%fprefix])
if PATCH and plan != Plan.IMPOSSIBLE:
plan = self._patch_redundancy(plan)
return plan
def _patch_redundancy(self, plan_str):
"""
Argument:
plan_str: list of high level plan (List(String))
Return:
list of high level plan that don't have redundancy. (List(String))
"""
i = 0
while i < len(plan_str)-1:
if "MOVE_TO" in plan_str[i] and "MOVE_TO" in plan_str[i+1]:
#pose = plan_str[i+1].split()[-1]
del plan_str[i]
#spl = plan_str[i].split()
#spl[-1] = pose
#plan_str[i] = " ".join(spl)
else:
i += 1
#while i < len(plan_str)-1:
# if "MOVETO" in plan_str[i] and "MOVETO" in plan_str[i+1]:
# pose = plan_str[i+1].split()[-1]
# del plan_str[i+1]
# spl = plan_str[i].split()
# spl[-1] = pose
# plan_str[i] = " ".join(spl)
# else:
# i += 1
for i in range(len(plan_str)):
spl = plan_str[i].split(":", 1)
plan_str[i] = "%s:%s"%(i, spl[1])
return plan_str
def apply_action(self, initial, action):
new_initial = []
preds = [pred for pred in action.preds if pred['hl_info'] == 'eff']
pred_reps = {pred['pred'].get_rep(): pred for pred in preds}
for pred in initial:
if pred not in pred_reps or not pred_reps[pred]['negated']:
new_initial.append(pred)
for pred in pred_reps:
if not pred_reps[pred]['negated'] and pred not in new_initial:
new_initial.append(pred)
return new_initial
class DummyHLSolver(HLSolver):
def _translate_domain(self, domain_config):
return "translate domain"
def translate_problem(self, concr_prob):
return "translate problem"
def solve(self, abs_prob, domain, concr_prob, prefix=None):
return "solve"
| StarcoderdataPython |
3303516 | from __future__ import absolute_import, division, print_function, unicode_literals
import six
from mpl_toolkits.axisartist.axislines import *
| StarcoderdataPython |
1770152 | <gh_stars>0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from .utils import scale_images, calculate_fid
def sample(gan, train_dataset):
"""Generate and visualize sample generated by the GAN
Args:
gan (GAN): Compiled GAN model
train_dataset (tf.data.Dataset): training data to sample from
"""
random_latent_vectors = tf.random.truncated_normal(
shape=(1, gan.latent_dim)
)
random_labels = tf.math.floor(gan.num_classes * tf.random.uniform((1, 1)))
inputs = (random_latent_vectors, random_labels)
gen_imgs = gan(inputs, training=False)
real_imgs, real_labels = next(iter(train_dataset))
real_img = real_imgs[:1, ...]
real_label = real_labels[:1, ...]
pred_real = gan.discriminator(real_img, real_label, training=False)
pred_gen = gan.discriminator(gen_imgs, random_labels, training=False)
print("Probability of real image being real:", tf.nn.sigmoid(pred_real))
print("Probability of generated image being real:", tf.nn.sigmoid(pred_gen))
img = gen_imgs[0]
img = (img + 1.) / 2.
if (img.shape[-1] == 1):
img = np.squeeze(img, axis=-1)
plt.title("Generated image")
plt.imshow(img, cmap='gray')
else:
plt.title("Generated image")
plt.imshow(img)
plt.show()
def loss_curve(history):
"""Visualize the training loss curve.
Args:
history (tf.keras.callbacks.History): training history
"""
plt.plot(history.history['g_loss'], label='generator loss')
plt.plot(history.history['d_loss'], label='discriminator loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.grid(axis='y')
plt.legend(loc='lower right')
plt.show()
def compute_fid(gan, real_images):
"""Compute FID score to evaluate GAN model performance
Args:
gan (GAN): GAN model
real_images (tf.Tensor): real images from training dataset
Returns:
int: computed FID score
"""
# generate images
num_images = real_images.shape[0]
latent_samples = tf.random.truncated_normal(shape=(16, gan.latent_dim))
random_labels = tf.math.floor(
gan.num_classes * tf.random.uniform(shape=[16, 1]))
inputs = (latent_samples, random_labels)
generated_images = gan(inputs, training=False)
# resize images
images1 = scale_images(real_images, (299, 299, 3))
images2 = scale_images(generated_images, (299, 299, 3))
# calculate fid
fid = calculate_fid(images1, images2)
return fid
def plot_imgs_grid(imgs, path=None):
"""Plot a grid of generated images
Args:
imgs (tf.Tensor): images to plot
path (str, optional): path to directory where to save images. Defaults to None.
"""
n_imgs = imgs.shape[0]
grid_side = np.sqrt(n_imgs)
fig, axes = plt.subplots(
int(round(grid_side)),
int(round(grid_side)),
figsize=(n_imgs, n_imgs)
)
if path:
file_counter = sum([len(files) for r, d, files in os.walk(path)])
axes = np.reshape(axes, (-1,))
for i, ax in enumerate(axes):
img = imgs[i, ...]
img = (img + 1.) / 2.
if img.shape[-1] == 1:
img = np.squeeze(img, axis=-1)
ax.imshow(img, cmap='gray')
else:
ax.imshow(img)
if path:
# save images
name = 'Dog_'+str(file_counter+i)
image_path = os.path.join(path, name)
img = 255 * img.numpy()
img = img.astype('uint8')
cv2.imwrite(image_path+'.jpg', img)
print("Image saved as: "+name)
plt.show()
def generate_and_plot_images(gan, num_images, save_path=None):
"""Generate and plot images from the trained GAN model.
Args:
gan (GAN): trained GAN model
num_images (int): number of images to plot
save_path (str, optional): path to directory where to save images. Defaults to None.
"""
latent_samples = tf.random.truncated_normal(
shape=(num_images, gan.latent_dim)
)
random_labels = tf.math.floor(
gan.num_classes * tf.random.uniform(shape=[num_images, 1]))
inputs = (latent_samples, random_labels)
generated_images = gan(inputs, training=False)
plot_imgs_grid(generated_images, path=save_path)
| StarcoderdataPython |
4814216 | <reponame>tabris2015/nayra_api
import datetime
import os
from flask_restful import Resource, abort, fields, marshal_with, reqparse
from werkzeug.datastructures import FileStorage
from werkzeug.utils import secure_filename
from app import app, db
from app.models import Audio, AudioCategory
ALLOWED_EXTENSIONS = ["wav", "mp3"]
audio_fields = {
"id": fields.Integer,
"name": fields.String,
"content": fields.String,
"category_id": fields.Integer,
"modified": fields.DateTime
}
def audio_type(file: FileStorage):
if str.isalnum(file.filename):
raise ValueError("invalid filename")
elif file.filename.split(".")[-1].lower() not in ALLOWED_EXTENSIONS:
raise ValueError("file type not allowed")
else:
audio_file = Audio.query.filter_by(name=file.filename).first();
if audio_file:
raise ValueError("audio file already exists with id {id}".format(id=audio_file.id))
else:
return file
audio_parser = reqparse.RequestParser(bundle_errors=True)
audio_parser.add_argument("file", type=audio_type, location="files", help="Invalid audio file: {error_msg}")
audio_parser.add_argument("content", required=True, help="Invalid content")
audio_parser.add_argument("category_id", type=int, required=True, help="Invalid category id")
class AudioRes(Resource):
def check_audio(self, audio):
if not audio:
abort(404, message="audio not found")
@marshal_with(audio_fields)
def get(self, audio_id: int):
audio = Audio.query.filter_by(id=audio_id).first()
self.check_audio(audio)
return audio, 200
@marshal_with(audio_fields)
def put(self, audio_id: int):
args = audio_parser.parse_args()
audio = Audio.query.filter_by(id=audio_id).first()
self.check_audio(audio)
category = AudioCategory.query.filter_by(id=args["category_id"]).first()
if not category:
abort(400, message={"category_id": "audio category not found"})
audio.content = args["content"]
audio.category = category
audio.modified = datetime.datetime.now()
db.session.commit()
return audio, 201
def delete(self, audio_id: int):
audio = Audio.query.filter_by(id=audio_id).first()
self.check_audio(audio)
if os.path.exists(audio.filepath):
os.remove(audio.filepath)
db.session.delete(audio)
db.session.commit()
return "", 204
class AudioListRes(Resource):
@marshal_with(audio_fields)
def get(self):
audios = Audio.query.all()
return audios, 200
@marshal_with(audio_fields)
def post(self):
args = audio_parser.parse_args()
category = AudioCategory.query.filter_by(id=args["category_id"]).first()
if not category:
abort(400, message={"category_id": "audio category not found"})
if not args["file"]:
abort(400, message={"file": "audio file required"})
file = args["file"]
name = secure_filename(file.filename)
filepath = os.path.join(app.config['AUDIOS_FOLDER'], name)
file.save(filepath)
audio = Audio(
name=name,
filepath=filepath,
content=args["content"],
category=category
)
db.session.add(audio)
db.session.commit()
return audio, 201
| StarcoderdataPython |
26239 | <gh_stars>0
#------------------------------------------------------------
# Dependencies
#------------------------------------------------------------
import pathlib
import os
import argparse
#------------------------------------------------------------
#
#------------------------------------------------------------
def is_video_file(in_file):
if in_file.suffix == '.avi':
return True
elif in_file.suffix == '.mp4':
return True
elif in_file.suffix == '.mkv':
return True
elif in_file.suffix == '.wmv':
return True
else:
return False
def rename_video(sub_dir):
counter = 0
for x in sub_dir.iterdir():
counter = counter + 1
if is_video_file(x):
print(x.name + ' is video')
new_name_path = x.with_name(sub_dir.name)
new_name_path = new_name_path.with_suffix(x.suffix)
new_path_path = new_name_path.parents[1].joinpath(new_name_path.name)
while new_path_path.exists():
new_path_path = new_path_path.with_name(sub_dir.name + ' ' + str(counter) + new_path_path.suffix)
print(new_path_path.parts)
x.rename(new_path_path)
# else:
# print(x.name + ' is not video')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process path string.')
parser.add_argument('input_directory')
args = parser.parse_args()
input_directory_path = pathlib.Path(args.input_directory)
for x in input_directory_path.iterdir():
if x.is_dir():
rename_video(x) | StarcoderdataPython |
3310567 | def pipeline(*filters):
def inner(value):
final_value = value
for filter in reversed(filters):
final_value = filter(final_value)
return final_value
return inner
def limpa_texto(text):
return text.replace('\n', '')
def troca_eh_massa(text):
return text.replace('massa', 'chatão')
def acha_nomes(text):
nomes = ('Eduardo', 'Fausto')
final = text
for nome in nomes:
final = final.replace(nome, f'NOME({nome})')
return final
def acha_verbos(text):
nomes = ('é',)
final = text
for nome in nomes:
final = final.replace(nome, f'VERBO({nome})')
return final
def acha_adjetivos(text):
nomes = ('massa', 'chatão')
final = text
for nome in nomes:
final = final.replace(nome, f'ADJ({nome})')
return final
texto = '\n\n\n\n Eduardo e Fausto são massa\n\n\n'
p = pipeline(limpa_texto, acha_nomes, acha_verbos, acha_adjetivos, troca_eh_massa)
print(texto)
print(p(texto))
| StarcoderdataPython |
1647932 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 11 00:31:17 2021
@author: jaimel
"""
import pandas as pd
df = pd.read_csv("/home/jolima/Documentos/multilabel-classification/multi-label-classification/dataset/kaggle_dataset.csv")
df = df.sample(n=10)
columns = ['Computer Science', 'Physics', 'Mathematics',
'Statistics', 'Quantitative Biology', 'Quantitative Finance']
labels = df[columns].values
for label in labels:
ps = "".join([str(x) for x in label])
print(ps)
ps = list(ps)
print(ps)
| StarcoderdataPython |
3311527 | <reponame>diarts/aioyoutube
from functools import wraps
from aioyoutube.exeptions import (
VariableTypeError, VariableValueError
)
def search_validation(coroutine):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
"""Decorator validate passed parameters for api method getting
search result."""
acceptable_order = (
'date', 'rating', 'relevance', 'title', 'videoCount', 'viewCount',
)
acceptable_search_by = (
'video', 'channel', 'playlist',
)
key = kwargs.get('key')
text = kwargs.get('text')
max_results = kwargs.get('max_results')
page_token = kwargs.get('page_token')
order = kwargs.get('order')
published_after = kwargs.get('published_after')
published_before = kwargs.get('published_before')
search_by = kwargs.get('search_by')
if key and not isinstance(key, str):
raise VariableTypeError(
f'Argument "key" must be an str, current type is {type(key)}.'
)
elif text and not isinstance(text, str):
raise VariableTypeError(
f'Argument "text" must be an str, current type is {type(text)}.'
)
elif max_results and not isinstance(max_results, int):
raise VariableTypeError(
'Argument "max_results" must be an int, current type is'
f' {type(max_results)}.'
)
elif max_results and not 0 <= max_results <= 50:
raise VariableValueError(
'Argument "max_result" must be in range from 1 to 50, '
f'current value is {max_results}.'
)
elif page_token and not isinstance(page_token, str):
raise VariableTypeError(
'Argument "page_token" must be an str, current type is '
f'{type(page_token)}.'
)
elif order and not isinstance(order, str):
raise VariableTypeError(
'Argument "order" must be an str, current type is '
f'{type(order)}.'
)
elif order and order not in acceptable_order:
raise VariableValueError(
'Acceptable values for argument "order" is '
f'{acceptable_order}, current value is {order}.'
)
elif published_after and not isinstance(published_after, int):
raise VariableTypeError(
'Argument "published_after" must be an int, current type is '
f'{type(published_after)}.'
)
elif published_before and not isinstance(published_before, int):
raise VariableTypeError(
'Argument "published_before" must be an int, current type is '
f'{type(published_before)}.'
)
elif search_by and not isinstance(search_by, str):
raise VariableTypeError(
'Argument "search_by" must be an str, current type is '
f'{type(search_by)}.'
)
elif search_by and search_by not in acceptable_search_by:
raise VariableValueError(
'Acceptable values for argument "search_by" is '
f'{acceptable_search_by}, current value is {search_by}.'
)
return await coroutine(*args, **kwargs)
return wrapper
def comment_threads_validation(coroutine):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
"""Decorator validate passed parameters for api method getting
video comment_threads."""
acceptable_part = ('id', 'replies', 'snippet',)
acceptable_order = ('time', 'relevance',)
acceptable_text_format = ('plainText', 'html',)
key = kwargs.get('key')
part = kwargs.get('part')
video_id = kwargs.get('video_id')
max_results = kwargs.get('max_results')
page_token = kwargs.get('page_token')
order = kwargs.get('order')
text_format = kwargs.get('text_format')
search_text = kwargs.get('search_text')
if key and not isinstance(key, str):
raise VariableTypeError(
f'Argument "key" must be an str, current type is {type(key)}.'
)
elif part and not isinstance(part, list):
raise VariableTypeError(
'Argument "part" must be an list, current type is'
f' {type(part)}.'
)
elif part and not all(isinstance(item, str) for item in part):
raise VariableTypeError(
'Argument "part" must contain only str.'
)
elif part and not all(item in acceptable_part for item in part):
raise VariableValueError(
'Acceptable values for part contain parameter is '
f'{acceptable_part}, current part contain {part}.'
)
elif max_results and not isinstance(max_results, int):
raise VariableTypeError(
'Argument "max_results" must be an int, current type is'
f' {type(max_results)}.'
)
elif max_results and not 0 <= max_results <= 100:
raise VariableValueError(
'Argument "max_result" must be in range from 1 to 100, '
f'current value is {max_results}.'
)
elif page_token and not isinstance(page_token, str):
raise VariableTypeError(
'Argument "page_token" must be an str, current type is '
f'{type(page_token)}.'
)
elif order and not isinstance(order, str):
raise VariableTypeError(
'Argument "order" must be an str, current type is '
f'{type(order)}.'
)
elif order and order not in acceptable_order:
raise VariableValueError(
'Acceptable values for argument "order" is '
f'{acceptable_order}, current value is {order}.'
)
elif video_id and not isinstance(video_id, str):
raise VariableTypeError(
'Argument "video_id" must be an str, current type is'
f' {type(video_id)}.'
)
elif text_format and not isinstance(text_format, str):
raise VariableTypeError(
'Argument "text_format" must be an str, current type is'
f' {type(text_format)}.'
)
elif text_format and text_format not in acceptable_text_format:
raise VariableValueError(
'Acceptable values for argument "order" is '
f'{acceptable_text_format}, current value is {text_format}.'
)
elif search_text and not isinstance(search_text, str):
raise VariableTypeError(
'Argument "search_text" must be an str, current type is'
f' {type(search_text)}.'
)
return await coroutine(*args, **kwargs)
return wrapper
def comments_validation(coroutine):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
"""Decorator validate passed parameters for api method getting
comments from comment_threads."""
acceptable_part = ('id', 'snippet',)
acceptable_text_format = ('plainText', 'html',)
key = kwargs.get('key')
part = kwargs.get('part')
max_results = kwargs.get('max_results')
page_token = kwargs.get('page_token')
parent_id = kwargs.get('parent_id')
text_format = kwargs.get('text_format')
if key and not isinstance(key, str):
raise VariableTypeError(
f'Argument "key" must be an str, current type is {type(key)}.'
)
elif part and not isinstance(part, list):
raise VariableTypeError(
'Argument "part" must be an list, current type is'
f' {type(part)}.'
)
elif part and not all(isinstance(item, str) for item in part):
raise VariableTypeError(
'Argument "part" must contain only str.'
)
elif part and not all(item in acceptable_part for item in part):
raise VariableValueError(
'Acceptable values for part contain parameter is '
f'{acceptable_part}, current part contain {part}.'
)
elif max_results and not isinstance(max_results, int):
raise VariableTypeError(
'Argument "max_results" must be an int, current type is'
f' {type(max_results)}.'
)
elif max_results and not 0 <= max_results <= 100:
raise VariableValueError(
'Argument "max_result" must be in range from 1 to 100, '
f'current value is {max_results}.'
)
elif page_token and not isinstance(page_token, str):
raise VariableTypeError(
'Argument "page_token" must be an str, current type is '
f'{type(page_token)}.'
)
elif text_format and not isinstance(text_format, str):
raise VariableTypeError(
'Argument "text_format" must be an str, current type is'
f' {type(text_format)}.'
)
elif text_format and text_format not in acceptable_text_format:
raise VariableValueError(
'Acceptable values for argument "order" is '
f'{acceptable_text_format}, current value is {text_format}.'
)
elif parent_id and not isinstance(parent_id, str):
raise VariableTypeError(
'Argument "parent_id" must be an str, current type is'
f' {type(parent_id)}.'
)
return await coroutine(*args, **kwargs)
return wrapper
def channels_validation(coroutine):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
"""Decorator validate passed parameters for api method getting
channels data."""
acceptable_part = (
'brandingSettings', 'contentDetails', 'contentOwnerDetails', 'id',
'localizations', 'snippet', 'statistics', 'status', 'topicDetails',
)
key = kwargs.get('key')
part = kwargs.get('part')
max_results = kwargs.get('max_results')
channel_id = kwargs.get('channel_id')
user_name = kwargs.get('user_name')
if key and not isinstance(key, str):
raise VariableTypeError(
f'Argument "key" must be an str, current type is {type(key)}.'
)
elif part and not isinstance(part, list):
raise VariableTypeError(
'Argument "part" must be an list, current type is'
f' {type(part)}.'
)
elif part and not all(isinstance(item, str) for item in part):
raise VariableTypeError(
'Argument "part" must contain only str.'
)
elif part and not all(item in acceptable_part for item in part):
raise VariableValueError(
'Acceptable values for part contain parameter is '
f'{acceptable_part}, current part contain {part}.'
)
elif max_results and not isinstance(max_results, int):
raise VariableTypeError(
'Argument "max_results" must be an int, current type is'
f' {type(max_results)}.'
)
elif max_results and not 0 <= max_results <= 50:
raise VariableValueError(
'Argument "max_result" must be in range from 1 to 50, '
f'current value is {max_results}.'
)
elif channel_id and user_name:
raise VariableValueError(
'Variable "channel_id" and "user_name" is not compatible, '
'pass only one of them.'
)
elif channel_id and not isinstance(channel_id, str):
raise VariableTypeError(
'Argument "channel_id" must be an str, current type'
f' is {type(channel_id)}.'
)
elif user_name and not isinstance(user_name, str):
raise VariableTypeError(
'Argument "user_name" must be an str, current type'
f' is {type(user_name)}.'
)
return await coroutine(*args, **kwargs)
return wrapper
def playlist_items_validation(coroutine):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
"""Decorator validate passed parameters for api method getting
playlist items data."""
acceptable_part = (
'contentDetails', 'id', 'snippet', 'status',
)
key = kwargs.get('key')
part = kwargs.get('part')
max_results = kwargs.get('max_results')
playlist_id = kwargs.get('playlist_id')
page_token = kwargs.get('page_token')
if key and not isinstance(key, str):
raise VariableTypeError(
f'Argument "key" must be an str, current type is {type(key)}.'
)
elif part and not isinstance(part, list):
raise VariableTypeError(
'Argument "part" must be an list, current type is'
f' {type(part)}.'
)
elif part and not all(isinstance(item, str) for item in part):
raise VariableTypeError(
'Argument "part" must contain only str.'
)
elif part and not all(item in acceptable_part for item in part):
raise VariableValueError(
'Acceptable values for part contain parameter is '
f'{acceptable_part}, current part contain {part}.'
)
elif max_results and not isinstance(max_results, int):
raise VariableTypeError(
'Argument "max_results" must be an int, current type is'
f' {type(max_results)}.'
)
elif max_results and not 0 <= max_results <= 50:
raise VariableValueError(
'Argument "max_result" must be in range from 1 to 50, '
f'current value is {max_results}.'
)
elif playlist_id and not isinstance(playlist_id, str):
raise VariableTypeError(
'Argument "playlist_id" must be an str, current type is'
f' {type(playlist_id)}.'
)
elif page_token and not isinstance(page_token, str):
raise VariableTypeError(
'Argument "page_token" must be an str, current type is '
f'{type(page_token)}.'
)
return await coroutine(*args, **kwargs)
return wrapper
def playlists_validation(coroutine):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
"""Decorator validate passed parameters for api method getting
playlists data."""
acceptable_part = (
'contentDetails', 'id', 'snippet', 'status', 'localizations',
'player',
)
key = kwargs.get('key')
part = kwargs.get('part')
max_results = kwargs.get('max_results')
channel_id = kwargs.get('channel_id')
page_token = kwargs.get('page_token')
if key and not isinstance(key, str):
raise VariableTypeError(
f'Argument "key" must be an str, current type is {type(key)}.'
)
elif part and not isinstance(part, list):
raise VariableTypeError(
'Argument "part" must be an list, current type is'
f' {type(part)}.'
)
elif part and not all(isinstance(item, str) for item in part):
raise VariableTypeError(
'Argument "part" must contain only str.'
)
elif part and not all(item in acceptable_part for item in part):
raise VariableValueError(
'Acceptable values for part contain parameter is '
f'{acceptable_part}, current part contain {part}.'
)
elif max_results and not isinstance(max_results, int):
raise VariableTypeError(
'Argument "max_results" must be an int, current type is'
f' {type(max_results)}.'
)
elif max_results and not 0 <= max_results <= 50:
raise VariableValueError(
'Argument "max_result" must be in range from 1 to 50, '
f'current value is {max_results}.'
)
elif channel_id and not isinstance(channel_id, str):
raise VariableTypeError(
'Argument "channel_id" must be an str, current type is'
f' {type(channel_id)}.'
)
elif page_token and not isinstance(page_token, str):
raise VariableTypeError(
'Argument "page_token" must be an str, current type is '
f'{type(page_token)}.'
)
return await coroutine(*args, **kwargs)
return wrapper
def videos_validation(coroutine):
@wraps(coroutine)
async def wrapper(*args, **kwargs):
"""Decorator validate passed parameters for api method getting
video data."""
acceptable_part = (
'contentDetails', 'id', 'liveStreamingDetails', 'localizations',
'player', 'recordingDetails', 'snippet', 'statistics',
'status', 'topicDetails',
)
key = kwargs.get('key')
part = kwargs.get('part')
max_results = kwargs.get('max_results')
video_ids = kwargs.get('video_ids')
page_token = kwargs.get('page_token')
if key and not isinstance(key, str):
raise VariableTypeError(
f'Argument "key" must be an str, current type is {type(key)}.'
)
elif part and not isinstance(part, list):
raise VariableTypeError(
'Argument "part" must be an list, current type is'
f' {type(part)}.'
)
elif part and not all(isinstance(item, str) for item in part):
raise VariableTypeError(
'Argument "part" must contain only str.'
)
elif part and not all(item in acceptable_part for item in part):
raise VariableValueError(
'Acceptable values for part contain parameter is '
f'{acceptable_part}, current part contain {part}.'
)
elif max_results and not isinstance(max_results, int):
raise VariableTypeError(
'Argument "max_results" must be an int, current type is'
f' {type(max_results)}.'
)
elif max_results and not 0 <= max_results <= 50:
raise VariableValueError(
'Argument "max_result" must be in range from 1 to 50, '
f'current value is {max_results}.'
)
elif video_ids and not isinstance(video_ids, list):
raise VariableTypeError(
'Argument "video_ids" must be an list, current type is'
f' {type(video_ids)}.'
)
elif video_ids and not all(isinstance(item, str) for item in video_ids):
raise VariableTypeError(
'Argument "video_ids" must contain only str.'
)
elif page_token and not isinstance(page_token, str):
raise VariableTypeError(
'Argument "page_token" must be an str, current type is '
f'{type(page_token)}.'
)
return await coroutine(*args, **kwargs)
return wrapper
| StarcoderdataPython |
1782032 | # -*- coding: utf-8 -*-
#
# Validates if 2nd level domain from PTR record points to an IP address
# that belongs to spam network.
#
import json
import yaml
import re
from cachetools import TTLCache
import socket
import time
import sys
import argparse
from goshawk.reporter import RabbitmqConsumer, WorkerPool
from goshawk.client import GoshawkClient
from goshawk.config import config, setup
##
## Globals
##
goshawk = None
blocklist_ip = None
cache_ip = TTLCache(maxsize=10000, ttl=1800)
cache_ptr = TTLCache(maxsize=10000, ttl=1800)
cache_reported = set()
queue_ip = list()
##
## Reporter Code
##
def check_name_reported(name):
if name in cache_reported:
return True
result = goshawk.get_records(
list_name=config['goshawk']['target_list'],
value=name)['count'] > 0
if result:
cache_reported.add(name)
return result
def report_name(d):
if check_name_reported(d):
print("Name %s already reported." % d)
return
print("Reporting name %s" % d)
goshawk.post_record(
reporter_name=config['goshawk']['reporter'],
list_name=config['goshawk']['target_list'],
value=d,
reason='A "%s" points to reported server' % d
)
def check_ip(ip):
try:
name, alias, addresslist = socket.gethostbyaddr(ip)
except Exception as e:
#print("gethostbyaddr:", ip, e)
return
if name in cache_ptr.keys():
return
if check_domain(name):
scan_24_block(ip)
cache_ptr[name] = 1
def check_domain(dom):
print("Checking %s " % dom)
ip = None
try:
tokens = dom.split('.')
if "" in tokens: tokens.remove("")
d = '.'.join(tokens[-2:])
except:
return
for i in [d, "www." + d]:
try:
ip = socket.gethostbyname(d)
if ip in blocklist_ip:
report_name(d)
return d
except Exception as e:
print(e)
return
def scan_24_block(ip):
print("Scan block: " + ip)
net = ip.rsplit('.', 1)[0]
for i in range(0, 256):
bip = net + "." + str(i)
print("Checking IP:", bip)
try:
name, alias, addresslist = socket.gethostbyaddr(bip)
except Exception as e:
#print("scan_24_block:error:", bip, e)
continue
check_domain(name)
cache_ip[ip] = 1
is_running = True
def worker(id):
while is_running:
if len(queue_ip) == 0:
time.sleep(1)
continue
ip = queue_ip.pop()
check_ip(ip)
def callback(ch, method, properties, body):
try:
x = body.decode()
if ' queries: client ' in x:
data = json.loads(x)
selector = data['message'].split()[7]
if not selector.endswith(config['reporter']['dnsbl_name']):
return
# remove dnsbl name
ip = selector[:-len(config['reporter']['dnsbl_name'])-1]
# reverse IP order
ip = ".".join(ip.split(".")[::-1])
if ip in cache_ip.keys():
return
cache_ip[ip] = 1
if len(queue_ip) > 50:
time.sleep(1)
print("queued items:", len(queue_ip))
queue_ip.append(ip)
except Exception as e:
print(e)
def parse_argv():
parser = argparse.ArgumentParser()
parser.add_argument('--check-ip',
dest='check_ip',
default=False,
help="Check one IP address and exit."
)
parser.add_argument('--config',
dest='config',
default="config.yml",
help="Reporter config file.")
return parser.parse_args(sys.argv[1:])
def init():
global goshawk, blocklist_ip
goshawk = GoshawkClient(
config['goshawk']['api_url'])
blocklist_ip = set(
goshawk.get_records_values(list_name=config['goshawk']['source_list']))
def main():
options = parse_argv()
setup(options.config)
print(config)
init()
if options.check_ip:
check_ip(options.check_ip)
sys.exit(0)
workers = WorkerPool(
target=worker,
threads=config['reporter']['worker_threads'])
workers.start()
rmqconsumer = RabbitmqConsumer(
amqp_uri=config['rabbitmq']['uri'],
exchange=config['rabbitmq']['exchange'],
routing_key=config['rabbitmq']['routing_key'],
consumer_name_prefix=config['goshawk']['reporter'])
rmqconsumer.consume(
callback=callback)
global is_running
is_running = False
workers.join()
if __name__ == "__main__":
main()
| StarcoderdataPython |
184027 | from unittest import TestCase
import torch
from transformers import AutoTokenizer
import nullprompt.templatizers as templatizers
class TestEncodeLabel(TestCase):
def setUp(self):
self._tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
def test_single_token(self):
output = templatizers._encode_label(self._tokenizer, 'the')
expected_output = torch.tensor([self._tokenizer.convert_tokens_to_ids(['the'])])
assert torch.equal(output, expected_output)
def test_multiple_tokens(self):
output = templatizers._encode_label(self._tokenizer, ['a', 'the'])
expected_output = torch.tensor([
self._tokenizer.convert_tokens_to_ids(['a', 'the'])
])
assert torch.equal(output, expected_output)
# TODO(rloganiv): Test no longer fails as Error was downgraded to a Warning
# message in the log. With introduction of separate templatizer for
# multi-token labels perhaps we should go back to raising an error?
# def test_fails_on_multi_word_piece_labels(self):
# with self.assertRaises(ValueError):
# utils.encode_label(
# self._tokenizer,
# 'Supercalifragilisticexpialidocious',
# tokenize=True,
# )
# with self.assertRaises(ValueError):
# utils.encode_label(
# self._tokenizer,
# ['Supercalifragilisticexpialidocious', 'chimneysweep'],
# tokenize=True,
# )
class TestTriggerTemplatizer(TestCase):
def setUp(self):
self.default_template = '[T] [T] {arbitrary} [T] {fields} [P]'
self.default_tokenizer = AutoTokenizer.from_pretrained(
'bert-base-cased',
add_prefix_space=True,
additional_special_tokens=('[T]', '[P]'),
)
self.default_instance = {
'arbitrary': 'does this',
'fields': 'work',
'label': 'and'
}
def test_bert(self):
templatizer = templatizers.TriggerTemplatizer(
self.default_template,
self.default_tokenizer,
)
model_inputs, label = templatizer(self.default_instance)
# Label should be mapped to its token id
expected_label = torch.tensor([self.default_tokenizer.convert_tokens_to_ids([self.default_instance['label']])])
assert torch.equal(expected_label, label)
# For BERT ouput is expected to have the following keys
assert 'input_ids' in model_inputs
assert 'token_type_ids' in model_inputs
assert 'attention_mask' in model_inputs
# Test that the custom masks match our expectations
expected_trigger_mask = torch.tensor(
[[False, True, True, False, False, True, False, False, False]]
)
assert torch.equal(expected_trigger_mask, model_inputs['trigger_mask'])
expected_predict_mask = torch.tensor(
[[False, False, False, False, False, False, False, True, False]]
)
assert torch.equal(expected_predict_mask, model_inputs['predict_mask'])
# Lastly, ensure [P] is replaced by a [MASK] token
input_ids = model_inputs['input_ids']
predict_mask = model_inputs['predict_mask']
predict_token_id = input_ids[predict_mask].squeeze().item()
assert predict_token_id == self.default_tokenizer.mask_token_id
def test_roberta(self):
tokenizer = AutoTokenizer.from_pretrained(
'roberta-base',
add_prefix_space=True,
additional_special_tokens=('[T]', '[P]'),
)
templatizer = templatizers.TriggerTemplatizer(
self.default_template,
tokenizer,
)
model_inputs, label = templatizer(self.default_instance)
# Label should be mapped to its token id
expected_label = torch.tensor([tokenizer.convert_tokens_to_ids([self.default_instance['label']])])
assert torch.equal(expected_label, label)
# For BERT ouput is expected to have the following keys
assert 'input_ids' in model_inputs
assert 'attention_mask' in model_inputs
# Test that the custom masks match our expectations
expected_trigger_mask = torch.tensor(
[[False, True, True, False, False, True, False, False, False]]
)
assert torch.equal(expected_trigger_mask, model_inputs['trigger_mask'])
expected_predict_mask = torch.tensor(
[[False, False, False, False, False, False, False, True, False]]
)
assert torch.equal(expected_predict_mask, model_inputs['predict_mask'])
# Lastly, ensure [P] is replaced by a [MASK] token
input_ids = model_inputs['input_ids']
predict_mask = model_inputs['predict_mask']
predict_token_id = input_ids[predict_mask].squeeze().item()
assert predict_token_id == tokenizer.mask_token_id
class TestMultiTokenTemplatizer(TestCase):
def setUp(self):
self.default_template = '[T] [T] {arbitrary} [T] {fields} [P]'
self.default_tokenizer = AutoTokenizer.from_pretrained(
'bert-base-cased',
add_prefix_space=True,
additional_special_tokens=('[T]', '[P]'),
)
def test_label(self):
templatizer = templatizers.MultiTokenTemplatizer(
self.default_template,
self.default_tokenizer,
add_padding=True,
)
format_kwargs = {
'arbitrary': 'ehh',
'fields': 'whats up doc',
'label': 'bugs bunny'
}
model_inputs, labels = templatizer(format_kwargs)
input_ids = model_inputs.pop('input_ids')
# Check that all shapes are the same
for tensor in model_inputs.values():
self.assertEqual(input_ids.shape, tensor.shape)
self.assertEqual(input_ids.shape, labels.shape)
# Check that detokenized inputs replaced [T] and [P] with the correct
# number of masks. The expected number of predict masks is 5,
# corresponding to:
# ['bugs', 'b', '##un', '##ny', '<pad>']
# and the expected number of trigger masks is 3.
self.assertEqual(model_inputs['trigger_mask'].sum().item(), 3)
self.assertEqual(model_inputs['predict_mask'].sum().item(), 5)
mask_token_id = self.default_tokenizer.mask_token_id
num_masks = input_ids.eq(mask_token_id).sum().item()
self.assertEqual(num_masks, 8)
| StarcoderdataPython |
3223107 | <reponame>anemesio/Pyyhon3-Exercicios
n = int(input('Digite um valor: '))
a = n - 1
s = n + 1
print('Analisando o valor {}, seu antecessor é {} e seu sucessor é {}.'.format(n, a, s))
| StarcoderdataPython |
1781203 | <filename>sdk/AsposeEmailCloudSdk/api/contact_api.py
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="contact_api.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
from __future__ import absolute_import
from AsposeEmailCloudSdk.api.api_base import ApiBase
from AsposeEmailCloudSdk.models import *
class ContactApi(ApiBase):
"""
Aspose.Email Cloud API. ContactApi operations.
"""
def __init__(self, api_client):
super(ContactApi, self).__init__(api_client)
def as_file(self, request: ContactAsFileRequest) -> str:
"""Converts contact model to specified format and returns as file
:param request: Contact model and format to convert
:type request: ContactAsFileRequest
:return: str
"""
# verify the required parameter 'request' is set
if request is None:
raise ValueError("Missing the required parameter `request` when calling `as_file`")
collection_formats = {}
path = '/email/Contact/as-file'
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['multipart/form-data'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
body_params = request
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, None, None, header_params, None, body_params, None, None, auth_settings)
return self._make_request(http_request_object, 'PUT', 'file')
def as_mapi(self, contact_dto: ContactDto) -> MapiContactDto:
"""Converts ContactDto to MapiContactDto.
:param contact_dto: Contact model to convert
:type contact_dto: ContactDto
:return: MapiContactDto
"""
# verify the required parameter 'contact_dto' is set
if contact_dto is None:
raise ValueError("Missing the required parameter `contact_dto` when calling `as_mapi`")
collection_formats = {}
path = '/email/Contact/as-mapi'
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
body_params = contact_dto
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, None, None, header_params, None, body_params, None, None, auth_settings)
return self._make_request(http_request_object, 'PUT', 'MapiContactDto')
def convert(self, request: ContactConvertRequest) -> str:
"""Converts contact document to specified format and returns as file
:param request: ContactConvertRequest object with parameters
:type request: ContactConvertRequest
:return: str
"""
# verify the required parameter 'to_format' is set
if request.to_format is None:
raise ValueError("Missing the required parameter `to_format` when calling `convert`")
# verify the required parameter 'from_format' is set
if request.from_format is None:
raise ValueError("Missing the required parameter `from_format` when calling `convert`")
# verify the required parameter 'file' is set
if request.file is None:
raise ValueError("Missing the required parameter `file` when calling `convert`")
collection_formats = {}
path = '/email/Contact/convert'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('toFormat') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.to_format if request.to_format is not None else '')
else:
if request.to_format is not None:
query_params.append((self._lowercase_first_letter('toFormat'), request.to_format))
path_parameter = '{' + self._lowercase_first_letter('fromFormat') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.from_format if request.from_format is not None else '')
else:
if request.from_format is not None:
query_params.append((self._lowercase_first_letter('fromFormat'), request.from_format))
form_params = []
local_var_files = []
if request.file is not None:
local_var_files.append((self._lowercase_first_letter('File'), request.file))
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['multipart/form-data'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['multipart/form-data'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'PUT', 'file')
def from_file(self, request: ContactFromFileRequest) -> ContactDto:
"""Converts contact document to a model representation
:param request: ContactFromFileRequest object with parameters
:type request: ContactFromFileRequest
:return: ContactDto
"""
# verify the required parameter 'format' is set
if request.format is None:
raise ValueError("Missing the required parameter `format` when calling `from_file`")
# verify the required parameter 'file' is set
if request.file is None:
raise ValueError("Missing the required parameter `file` when calling `from_file`")
collection_formats = {}
path = '/email/Contact/from-file'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('format') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.format if request.format is not None else '')
else:
if request.format is not None:
query_params.append((self._lowercase_first_letter('format'), request.format))
form_params = []
local_var_files = []
if request.file is not None:
local_var_files.append((self._lowercase_first_letter('File'), request.file))
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['multipart/form-data'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'PUT', 'ContactDto')
def get(self, request: ContactGetRequest) -> ContactDto:
"""Get contact document from storage.
:param request: ContactGetRequest object with parameters
:type request: ContactGetRequest
:return: ContactDto
"""
# verify the required parameter 'format' is set
if request.format is None:
raise ValueError("Missing the required parameter `format` when calling `get`")
# verify the required parameter 'file_name' is set
if request.file_name is None:
raise ValueError("Missing the required parameter `file_name` when calling `get`")
collection_formats = {}
path = '/email/Contact'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('format') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.format if request.format is not None else '')
else:
if request.format is not None:
query_params.append((self._lowercase_first_letter('format'), request.format))
path_parameter = '{' + self._lowercase_first_letter('fileName') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.file_name if request.file_name is not None else '')
else:
if request.file_name is not None:
query_params.append((self._lowercase_first_letter('fileName'), request.file_name))
path_parameter = '{' + self._lowercase_first_letter('folder') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.folder if request.folder is not None else '')
else:
if request.folder is not None:
query_params.append((self._lowercase_first_letter('folder'), request.folder))
path_parameter = '{' + self._lowercase_first_letter('storage') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.storage if request.storage is not None else '')
else:
if request.storage is not None:
query_params.append((self._lowercase_first_letter('storage'), request.storage))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'ContactDto')
def get_as_file(self, request: ContactGetAsFileRequest) -> str:
"""Converts contact document from storage to specified format and returns as file
:param request: ContactGetAsFileRequest object with parameters
:type request: ContactGetAsFileRequest
:return: str
"""
# verify the required parameter 'file_name' is set
if request.file_name is None:
raise ValueError("Missing the required parameter `file_name` when calling `get_as_file`")
# verify the required parameter 'to_format' is set
if request.to_format is None:
raise ValueError("Missing the required parameter `to_format` when calling `get_as_file`")
# verify the required parameter 'from_format' is set
if request.from_format is None:
raise ValueError("Missing the required parameter `from_format` when calling `get_as_file`")
collection_formats = {}
path = '/email/Contact/as-file'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('fileName') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.file_name if request.file_name is not None else '')
else:
if request.file_name is not None:
query_params.append((self._lowercase_first_letter('fileName'), request.file_name))
path_parameter = '{' + self._lowercase_first_letter('toFormat') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.to_format if request.to_format is not None else '')
else:
if request.to_format is not None:
query_params.append((self._lowercase_first_letter('toFormat'), request.to_format))
path_parameter = '{' + self._lowercase_first_letter('fromFormat') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.from_format if request.from_format is not None else '')
else:
if request.from_format is not None:
query_params.append((self._lowercase_first_letter('fromFormat'), request.from_format))
path_parameter = '{' + self._lowercase_first_letter('storage') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.storage if request.storage is not None else '')
else:
if request.storage is not None:
query_params.append((self._lowercase_first_letter('storage'), request.storage))
path_parameter = '{' + self._lowercase_first_letter('folder') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.folder if request.folder is not None else '')
else:
if request.folder is not None:
query_params.append((self._lowercase_first_letter('folder'), request.folder))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['multipart/form-data'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'file')
def get_list(self, request: ContactGetListRequest) -> ContactStorageList:
"""Get contact list from storage folder.
:param request: ContactGetListRequest object with parameters
:type request: ContactGetListRequest
:return: ContactStorageList
"""
# verify the required parameter 'format' is set
if request.format is None:
raise ValueError("Missing the required parameter `format` when calling `get_list`")
collection_formats = {}
path = '/email/Contact/list'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('format') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.format if request.format is not None else '')
else:
if request.format is not None:
query_params.append((self._lowercase_first_letter('format'), request.format))
path_parameter = '{' + self._lowercase_first_letter('folder') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.folder if request.folder is not None else '')
else:
if request.folder is not None:
query_params.append((self._lowercase_first_letter('folder'), request.folder))
path_parameter = '{' + self._lowercase_first_letter('storage') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.storage if request.storage is not None else '')
else:
if request.storage is not None:
query_params.append((self._lowercase_first_letter('storage'), request.storage))
path_parameter = '{' + self._lowercase_first_letter('itemsPerPage') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.items_per_page if request.items_per_page is not None else '')
else:
if request.items_per_page is not None:
query_params.append((self._lowercase_first_letter('itemsPerPage'), request.items_per_page))
path_parameter = '{' + self._lowercase_first_letter('pageNumber') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.page_number if request.page_number is not None else '')
else:
if request.page_number is not None:
query_params.append((self._lowercase_first_letter('pageNumber'), request.page_number))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'ContactStorageList')
def save(self, request: ContactSaveRequest):
"""Save contact to storage.
:param request: Create/Update contact request.
:type request: ContactSaveRequest
:return: None
"""
# verify the required parameter 'request' is set
if request is None:
raise ValueError("Missing the required parameter `request` when calling `save`")
collection_formats = {}
path = '/email/Contact'
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
body_params = request
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, None, None, header_params, None, body_params, None, None, auth_settings)
return self._make_request(http_request_object, 'PUT', None)
| StarcoderdataPython |
3376186 | """ Shim for IPython before and after the big split
"""
try:
import traitlets
import traitlets.config as config
except ImportError:
from IPython.utils import traitlets
from IPython import config
try:
import nbformat
except ImportError:
from IPython import nbformat
try:
import nbconvert
except ImportError:
from IPython import nbconvert
# Use notebook format version 4 by default
nbf = nbformat.v4
| StarcoderdataPython |
4824278 | # Copyright (c) 2015 Advanced Micro Devices, Inc.
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
from __future__ import print_function
import m5
from m5.objects import *
from m5.util.convert import *
import operator, os, platform, getpass
from os import mkdir, makedirs, getpid, listdir, stat, access
from pwd import getpwuid
from os.path import join as joinpath
from os.path import isdir
from shutil import rmtree, copyfile
def hex_mask(terms):
dec_mask = reduce(operator.or_, [2**i for i in terms], 0)
return "%08x" % dec_mask
def file_append(path, contents):
with open(joinpath(*path), 'a') as f:
f.write(str(contents))
def replace_tree(path):
if isdir(path):
rmtree(path)
mkdir(path)
def config_filesystem(options):
fsdir = joinpath(m5.options.outdir, 'fs')
replace_tree(fsdir)
# Set up /proc
procdir = joinpath(fsdir, 'proc')
mkdir(procdir)
for i in xrange(options.num_cpus):
one_cpu = 'processor : %d\n' % (i) + \
'vendor_id : Generic\n' + \
'cpu family : 0\n' + \
'model : 0\n' + \
'model name : Generic\n' + \
'stepping : 0\n' + \
'cpu MHz : %0.3d\n' \
% (toFrequency(options.cpu_clock)/mega) + \
'cache size: : %dK\n' \
% (toMemorySize(options.l2_size)/kibi) + \
'physical id : 0\n' + \
'siblings : %s\n' \
% options.num_cpus + \
'core id : %d\n' \
% i + \
'cpu cores : %d\n' \
% options.num_cpus + \
'fpu : yes\n' + \
'fpu exception : yes\n' + \
'cpuid level : 1\n' + \
'wp : yes\n' + \
'flags : fpu\n' + \
'cache alignment : %d\n' \
% options.cacheline_size + \
'\n'
file_append((procdir, 'cpuinfo'), one_cpu)
file_append((procdir, 'stat'), 'cpu 0 0 0 0 0 0 0\n')
for i in xrange(options.num_cpus):
file_append((procdir, 'stat'), 'cpu%d 0 0 0 0 0 0 0\n' % i)
# Set up /sys
sysdir = joinpath(fsdir, 'sys')
mkdir(sysdir)
# Set up /sys/devices/system/cpu
cpudir = joinpath(sysdir, 'devices', 'system', 'cpu')
makedirs(cpudir)
file_append((cpudir, 'online'), '0-%d' % (options.num_cpus-1))
file_append((cpudir, 'possible'), '0-%d' % (options.num_cpus-1))
# Set up /tmp
tmpdir = joinpath(fsdir, 'tmp')
replace_tree(tmpdir)
def register_node(cpu_list, mem, node_number):
nodebasedir = joinpath(m5.options.outdir, 'fs', 'sys', 'devices',
'system', 'node')
nodedir = joinpath(nodebasedir,'node%d' % node_number)
makedirs(nodedir)
file_append((nodedir, 'cpumap'), hex_mask(cpu_list))
file_append((nodedir, 'meminfo'),
'Node %d MemTotal: %dkB' % (node_number,
toMemorySize(str(mem))/kibi))
def register_cpu(physical_package_id, core_siblings,
core_id, thread_siblings):
cpudir = joinpath(m5.options.outdir, 'fs', 'sys', 'devices', 'system',
'cpu', 'cpu%d' % core_id)
if not isdir(joinpath(cpudir, 'topology')):
makedirs(joinpath(cpudir, 'topology'))
if not isdir(joinpath(cpudir, 'cache')):
makedirs(joinpath(cpudir, 'cache'))
file_append((cpudir, 'online'), '1')
file_append((cpudir, 'topology', 'physical_package_id'),
physical_package_id)
file_append((cpudir, 'topology', 'core_siblings'),
hex_mask(core_siblings))
file_append((cpudir, 'topology', 'core_id'), core_id)
file_append((cpudir, 'topology', 'thread_siblings'),
hex_mask(thread_siblings))
def register_cache(level, idu_type, size, line_size, assoc, cpus):
fsdir = joinpath(m5.options.outdir, 'fs')
for i in cpus:
cachedir = joinpath(fsdir, 'sys', 'devices', 'system', 'cpu',
'cpu%d' % i, 'cache')
j = 0
while isdir(joinpath(cachedir, 'index%d' % j)):
j += 1
indexdir = joinpath(cachedir, 'index%d' % j)
makedirs(indexdir)
file_append((indexdir, 'level'), level)
file_append((indexdir, 'type'), idu_type)
file_append((indexdir, 'size'), "%dK" % (toMemorySize(size)/kibi))
file_append((indexdir, 'coherency_line_size'), line_size)
# Since cache size = number of indices * associativity * block size
num_sets = toMemorySize(size) / int(assoc) * int(line_size)
file_append((indexdir, 'number_of_sets'), num_sets)
file_append((indexdir, 'physical_line_partition'), '1')
file_append((indexdir, 'shared_cpu_map'), hex_mask(cpus))
def redirect_paths(chroot):
# Redirect filesystem syscalls from src to the first matching dests
redirect_paths = [RedirectPath(app_path = "/proc",
host_paths = ["%s/fs/proc" % m5.options.outdir]),
RedirectPath(app_path = "/sys",
host_paths = ["%s/fs/sys" % m5.options.outdir]),
RedirectPath(app_path = "/tmp",
host_paths = ["%s/fs/tmp" % m5.options.outdir]),
RedirectPath(app_path = "/",
host_paths = ["%s" % chroot])]
return redirect_paths
| StarcoderdataPython |
1773070 | from __future__ import absolute_import
import sqlalchemy, sqlalchemy.exc
from sqlalchemy import Column, Integer, String
from . import Base, get_default_session
_dict_id = {} # maps dictionary text -> database ID
_dict_obj = {} # maps session, dictionary text -> database object
class DictionaryItem(Base):
__tablename__ = 'dictionary'
id = Column(Integer, primary_key=True)
text = Column(String(128), unique=True)
def __repr__(self):
return "<DictionaryItem " + self.text + ">"
def __init__(self, text):
self.text = text
def providing_class(self, handler):
from .. import properties
return properties.providing_class(self.text, handler)
raise_exception = object()
def get_dict_id(text, default=raise_exception, session=None, allow_query=True):
"""Get a DictionaryItem id for text (possibly cached). Raises KeyError if
no dictionary object exists for the specified text, unless a default is provided
in which case the default value is returned instead."""
from . import Session
if session is None:
session = Session()
_dict_id = _get_dict_cache_for_session(get_default_session())
close_session=True
else:
_dict_id = _get_dict_cache_for_session(session)
close_session=False
try:
return _dict_id[text]
except KeyError:
if allow_query:
try:
obj = session.query(DictionaryItem).filter_by(text=text).first()
except:
if default is raise_exception:
raise
else:
return default
finally:
if close_session:
session.close()
else:
obj = None
if obj is None:
if default is raise_exception:
raise
else:
return default
_dict_id[text] = obj.id
return obj.id
def get_or_create_dictionary_item(session, name):
"""This tries to get the DictionaryItem corresponding to name from
the database. If it doesn't exist, it creates a pending
object. Note that this must be called *while the database is
locked under the specified session* to prevent duplicate items
being created"""
if session not in _dict_obj:
_dict_obj[session] = {}
# try to get it from the cache
obj = _dict_obj[session].get(name, None)
if obj is not None:
return obj
# try to get it from the db
obj = session.query(DictionaryItem).filter_by(text=name).first()
if obj is None:
# try to create it
try:
obj = DictionaryItem(name)
session.add(obj)
#session.commit()
except sqlalchemy.exc.IntegrityError:
session.rollback()
obj = session.query(DictionaryItem).filter_by(text=name).first()
if obj is None:
raise # can't get it from the DB, can't create it from the DB... who knows...
_dict_obj[session][name] = obj
return obj
def _get_dict_cache_for_session(session):
session_dict = _dict_id.get(session, None)
if session_dict is None:
session_dict = {}
for dict_item in session.query(DictionaryItem):
session_dict[dict_item.text] = dict_item.id
_dict_id[session] = session_dict
return session_dict
def get_lexicon(session):
"""Get a list of all strings known in the dictionary table"""
dict_cache = _get_dict_cache_for_session(session)
return dict_cache.keys() | StarcoderdataPython |
138393 | <gh_stars>0
import cmdInterface
cmd = cmdInterface.cmdInterface()
cmd.newProject()
| StarcoderdataPython |
136749 | <reponame>ClementMaliet/playground-metrics<gh_stars>1-10
from playground_metrics.metrics_helper.mean_fbeta import MeanFBetaAtThresholds
| StarcoderdataPython |
3335320 | import json
def sanitize_json(json_dict, max_item_length=100):
"""Sanitizes json objects for safe storage in Postgres
"""
if json_dict is None:
return None
returned = json.dumps(json_dict)
returned = returned.replace('\\u0000', '\\\\x00')
returned = json.loads(returned)
for key, value in list(returned.items()):
if isinstance(value, (str, bytes)) and len(value) > max_item_length:
remainder = 5
value = value[:max_item_length - remainder - 3] + '...' + value[-remainder:]
returned[key] = value
return returned
| StarcoderdataPython |
1658474 | import json
import ujson
import time
import pickle
# Some performance tests to compare difference approches to seriallizing
# dicts in apache beam
# wrapper class for a dict. We need a unique class in dataflow to associate with
# a specific coder
class Message(dict):
pass
d = dict(
field_1= 'twas brillig and the slithy toves',
astro= 123456789012345,
lat= 0.321,
lon= 1.234,
field_2= 'aaa bbb ccc ddd'
)
json_str = json.dumps(d)
pickle_str = pickle.dumps(d)
n=1000000
print("Runing perf tests with %s iterations..." % n)
start = time.time()
for i in range(n):
d = pickle.loads(pickle_str)
print("%s pickle.loads" % (time.time() - start))
start = time.time()
for i in range(n):
d = json.loads(json_str)
print("%s json.loads " % ( time.time() - start))
start = time.time()
for i in range(n):
d = ujson.loads(json_str)
print("%s ujson.loads" % (time.time() - start))
start = time.time()
for i in range(n):
d = ujson.loads(json_str)
m = Message(d)
print("%s ujson.loads with assignment" % (time.time() - start))
| StarcoderdataPython |
1658819 | <filename>report_eb_autoscaling_alarms/asg_describe_scaling.py<gh_stars>0
# Writes an output CSV with summary of ASG Activity.
# You could use Excel afterwards on the CSV to sort descending NumActivityStatusSuccessful.
# Compare to the cloudwatch alarm history.
import boto3.session
from pathlib import Path
from datetime import datetime, timedelta
import pytz
import re
import json
import operator
from report_eb_autoscaling_alarms import eb_by_resource, aws_cache, util
MAX_PAGES = 10000
_asg_client = None
def init_client(profile_name, region_name):
asg_session = boto3.session.Session(profile_name=profile_name, region_name=region_name)
global _asg_client
_asg_client = asg_session.client('autoscaling')
# Returns list of describe_scaling_activities paginated responses.
def get_scaling_activity_pages(asg_name, refresh_cache=False):
key = 'describe_scaling_activities-' + asg_name
if aws_cache.has_key(key) and not refresh_cache:
return aws_cache.cache_get(key)
done = False
next_token = None
activity_pages = []
while not done and len(activity_pages) < MAX_PAGES:
if next_token:
history_page = _asg_client.describe_scaling_activities(AutoScalingGroupName=asg_name, NextToken=next_token)
else:
history_page = _asg_client.describe_scaling_activities(AutoScalingGroupName=asg_name)
activity_pages.append(history_page)
if 'NextToken' in history_page and history_page['NextToken']:
next_token = history_page['NextToken']
else:
done = True
if len(activity_pages) == MAX_PAGES:
print('WARNING: {} results truncated at {} pages'.format(key, MAX_PAGES))
aws_cache.cache_put(key, activity_pages)
return activity_pages
# Returns list of describe_auto_scaling_groups paginated responses.
def get_asg(asg_name, refresh_cache=False):
key = 'describe_auto_scaling_groups-' + asg_name
if aws_cache.has_key(key) and not refresh_cache:
return aws_cache.cache_get(key)
asg = _asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
aws_cache.cache_put(key, asg)
return asg
def calc_and_write_scaling_activity_for_beanstalk_asgs(refresh_cache=False):
# refresh_cache applies here to asg and scaling_activity, but not envs, resources, or alarms (those are
# refreshed at module start).
asg_env_pairs = lookup_beanstalk_asg_env_pairs(refresh_cache)
summary_rows = []
for asg_env_pair in asg_env_pairs:
asg, env_name = asg_env_pair['ASG']['AutoScalingGroups'][0], asg_env_pair['EnvName']
activity_pages = get_scaling_activity_pages(asg['AutoScalingGroupName'], refresh_cache)
summary_rows.append( calc_scaling_activity_one_asg(activity_pages, asg, env_name) )
write_scaling_activity_for_beanstalk_asgs(summary_rows)
def write_scaling_activity_for_beanstalk_asgs(summary_rows):
util.ensure_path_exists(util.OUTPUT_DIR)
output_filename = Path(util.OUTPUT_DIR + '/asg_activities.csv')
with output_filename.open(mode='w', encoding='UTF-8') as output_file:
write_column_headers(output_file)
num_written = 0
for summary_row in summary_rows:
write_summary_row(summary_row, output_file)
num_written += 1
print('wrote scaling activity for {} ASGs into {}'.format(num_written, output_filename))
def write_column_headers(output_file):
columns = [
'ASGName',
'EnvName',
'ASGMin',
'ASGMax',
'ActivityMaxAge',
'NumActivity',
'NumActivityStatusSuccessful',
'NumActivityStatusFailed',
'NumActivityDescLaunching',
'NumActivityDescTerminating',
'NumAlarmsCauseLaunching',
'NumAlarmsCauseTerminating',
'NameAlarmsCauseLaunching',
'NameAlarmsCauseTerminating'
]
output_file.write(','.join(columns) + '\n')
def write_summary_row(row, output_file):
columns = [
row['ASGName'],
row['EnvName'],
str(row['ASGMin']),
str(row['ASGMax']),
util.quote(row['ActivityMaxAge']),
str(row['NumActivity']),
str(row['NumActivityStatusSuccessful']),
str(row['NumActivityStatusFailed']),
str(row['NumActivityDescLaunching']),
str(row['NumActivityDescTerminating']),
str(row['NumAlarmsCauseLaunching']),
str(row['NumAlarmsCauseTerminating']),
util.quote(row['NameAlarmsCauseLaunching']),
util.quote(row['NameAlarmsCauseTerminating'])
]
output_file.write(','.join(columns) + '\n')
def lookup_beanstalk_asg_env_pairs(refresh_cache):
asg_env_pairs = []
envs = eb_by_resource.get_envs()
for env in envs['Environments']:
env_name = env['EnvironmentName']
resources = eb_by_resource.get_resources(env_name)
for asg_resource in resources['EnvironmentResources']['AutoScalingGroups']:
asg_name = asg_resource['Name']
asg = get_asg(asg_name, refresh_cache)
asg_env_pairs.append({
'ASG': asg,
'EnvName': env_name
})
return asg_env_pairs
# Returns a summary of the asg and its scaling activity.
def calc_scaling_activity_one_asg(activity_pages, asg, env_name):
far_in_the_future = pytz.utc.localize(datetime(2999, 12, 31))
oldest_start_time = far_in_the_future
total_activity_count = 0
activity_counts = {'Successful': 0, 'Failed': 0, 'Launching': 0, 'Terminating': 0}
alarm_causes = {'Launching': {}, 'Terminating': {}}
for activity_page in activity_pages:
for scaling_activity in activity_page['Activities']:
# http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_Activity.html
total_activity_count += 1
start_time = util.ensure_tz(scaling_activity['StartTime'])
if start_time < oldest_start_time:
oldest_start_time = start_time
status_code = scaling_activity['StatusCode']
if status_code == 'Successful' or status_code == 'Failed':
increment_activity_count(activity_counts, status_code)
alarm_name = extract_alarm_name(scaling_activity)
m = re.match('^(Launching|Terminating)', scaling_activity['Description'])
if m:
launch_or_term = m.group(1)
increment_activity_count(activity_counts, launch_or_term)
increment_alarm_causes(alarm_causes, launch_or_term, alarm_name)
if oldest_start_time == far_in_the_future:
activity_max_age = timedelta(0)
else:
now = datetime.now(oldest_start_time.tzinfo)
activity_max_age = now - oldest_start_time
num_alarms_launching, name_alarms_launching = summarize_alarm_causes(alarm_causes, 'Launching')
num_alarms_terminating, name_alarms_terminating = summarize_alarm_causes(alarm_causes, 'Terminating')
return {
'ASGName': asg['AutoScalingGroupName'],
'EnvName': env_name,
'ASGMin': asg['MinSize'],
'ASGMax': asg['MaxSize'],
'ActivityMaxAge': activity_max_age,
'NumActivity': total_activity_count,
'NumActivityStatusSuccessful': activity_counts['Successful'],
'NumActivityStatusFailed': activity_counts['Failed'],
'NumActivityDescLaunching': activity_counts['Launching'],
'NumActivityDescTerminating': activity_counts['Terminating'],
'NumAlarmsCauseLaunching': num_alarms_launching,
'NumAlarmsCauseTerminating': num_alarms_terminating,
'NameAlarmsCauseLaunching': name_alarms_launching,
'NameAlarmsCauseTerminating': name_alarms_terminating
}
def extract_alarm_name(scaling_activity):
# Two ways to get alarm name: 1. activity Details (structured json), 2. activity Cause (parse freeform string).
# Choosing Details.
if 'Details' in scaling_activity:
details = json.loads(scaling_activity['Details'])
if 'InvokingAlarms' in details and len(details['InvokingAlarms']) > 0:
return details['InvokingAlarms'][0]['AlarmName']
return 'Unknown_Alarm'
def increment_activity_count(activity_counts, key):
if key in activity_counts:
activity_counts[key] += 1
else:
activity_counts[key] = 1
def increment_alarm_causes(alarm_causes, launch_or_term, alarm_name):
if alarm_name in alarm_causes[launch_or_term]:
alarm_causes[launch_or_term][alarm_name] += 1
else:
alarm_causes[launch_or_term][alarm_name] = 1
def summarize_alarm_causes(alarm_causes, launch_or_term):
items = alarm_causes[launch_or_term].items()
counted_names = []
for alarm_name, count in sorted(items, key=operator.itemgetter(1), reverse=True):
counted_names.append('{} {}'.format(alarm_name, count))
return len(items), ', '.join(counted_names) | StarcoderdataPython |
3311975 | ###############################################################################
# RingPotential.py: The gravitational potential of a thin, circular ring
###############################################################################
import numpy
from scipy import special
from ..util import conversion
from .Potential import Potential
class RingPotential(Potential):
"""Class that implements the potential of an infinitesimally-thin, circular ring
.. math::
\\rho(R,z) = \\frac{\\mathrm{amp}}{2\pi\,R_0}\\,\\delta(R-R_0)\\,\\delta(z)
with :math:`\\mathrm{amp} = GM` the mass of the ring.
"""
def __init__(self,amp=1.,a=0.75,normalize=False,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a circular ring potential
INPUT:
amp - mass of the ring (default: 1); can be a Quantity with units of mass or Gxmass
a= (0.75) radius of the ring (can be Quantity)
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.; note that because the force is always positive at r < a, this does not work if a > 1
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units='mass')
a= conversion.parse_length(a,ro=self._ro)
self.a= a
self.a2= self.a**2
self._amp/= 2.*numpy.pi*self.a
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)):
if self.a > 1.:
raise ValueError('RingPotential with normalize= for a > 1 is not supported (because the force is always positive at r=1)')
self.normalize(normalize)
self.hasC= False
self.hasC_dxdv= False
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
# Stable as r -> infty
m= 4.*self.a/((numpy.sqrt(R)+self.a/numpy.sqrt(R))**2+z**2/R)
return -4.*self.a/numpy.sqrt((R+self.a)**2+z**2)*special.ellipk(m)
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
m= 4.*R*self.a/((R+self.a)**2+z**2)
return -2.*self.a/R/numpy.sqrt((R+self.a)**2+z**2)\
*(m*(R**2-self.a2-z**2)/4./(1.-m)/self.a/R*special.ellipe(m)
+special.ellipk(m))
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
m= 4.*R*self.a/((R+self.a)**2+z**2)
return -4.*z*self.a/(1.-m)*((R+self.a)**2+z**2)**-1.5*special.ellipe(m)
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rderiv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Raz2= (R+self.a)**2+z**2
Raz= numpy.sqrt(Raz2)
m= 4.*R*self.a/Raz2
R2ma2mz2o4aR1m= (R**2-self.a2-z**2)/4./self.a/R/(1.-m)
return (2*R**2+self.a2+3*R*self.a+z**2)/R/Raz2*self._Rforce(R,z)\
+2.*self.a/R/Raz*(m*(R**2+self.a2+z**2)/4./(1.-m)/self.a/R**2\
*special.ellipe(m)\
+(R2ma2mz2o4aR1m/(1.-m)*special.ellipe(m)
+0.5*R2ma2mz2o4aR1m*(special.ellipe(m)-special.ellipk(m))
+0.5*(special.ellipe(m)/(1.-m)-special.ellipk(m))/m)\
*4*self.a*(self.a2+z**2-R**2)/Raz2**2)
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Raz2= (R+self.a)**2+z**2
m= 4.*R*self.a/Raz2
# Explicitly swapped in zforce here, so the z/z can be cancelled
# and z=0 is handled properly
return -4.*(3.*z**2/Raz2-1.
+4.*((1.+m)/(1.-m)-special.ellipk(m)/special.ellipe(m))\
*self.a*R*z**2/Raz2**2/m)\
*self.a/(1.-m)*((R+self.a)**2+z**2)**-1.5*special.ellipe(m)
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2018-08-04 - Written - Bovy (UofT)
"""
Raz2= (R+self.a)**2+z**2
m= 4.*R*self.a/Raz2
return (3.*(R+self.a)/Raz2
-2.*((1.+m)/(1.-m)-special.ellipk(m)/special.ellipe(m))\
*self.a*(self.a2+z**2-R**2)/Raz2**2/m)*self._zforce(R,z)
| StarcoderdataPython |
1772862 | # -*- coding: utf-8 -*-
# filename : scraper.py
# description : Grabs movie links
# author : LikeToAccess
# email : <EMAIL>
# date : 07-15-2021
# version : v2.0
# usage : python scraper.py
# notes :
# license : MIT
# py version : 3.8.2 (must run on 3.6 or higher)
#==============================================================================
import time
import os
import sys
from selenium import webdriver
from selenium.common.exceptions import *
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import crop
import media
import config as cfg
from errors import NoResults
from media import log
class Scraper:
def __init__(self, minimize=True):
options = Options()
path = "Chrome Extensions"
files = os.listdir(path)
for file in files:
if file.endswith("crx"):
options.add_extension(os.path.abspath(path + "/" + file))
# options.add_argument("headless")
user_data_dir = os.path.abspath("selenium_data")
options.add_argument(f"user-data-dir={user_data_dir}")
options.add_argument("--disable-gpu")
options.add_argument("log-level=3")
self.driver = webdriver.Chrome(executable_path=os.path.abspath(cfg.executable), options=options)
self.first_launch = True
self.author = "0"
self.headers = {"user-agent": cfg.user_agent}
if minimize: self.driver.minimize_window()
def search(self, url, media_type=0):
if media_type == 0: # Movie (HD)
element_class = "item_hd"
description_class = "_smQamBQsETb"
elif media_type == 1: # Movie (CAM)
element_class = "item_cam"
description_class = "_smQamBQsETb"
elif media_type >= 2: # TV Show
element_class = "item_series"
description_class = "_skQummZWZxE"
self.open_link(url)
results, descriptions = self.get_results_from_search(
element_class=element_class,
decription_class=description_class
)
if not results:
if media_type >= 2: # TV Show
raise NoResults
media_type += 1
return self.search(url, media_type=media_type)
if media_type == 1: log("**INFO:** Film is in CAM quality.", silent=False)
if not descriptions: # this is the same as "if results and not descriptions:"
description_class = "_smQamBQsETb"
results, descriptions = self.get_results_from_search(
element_class=element_class,
decription_class=description_class
)
metadata = {}
for description in descriptions:
if description.get_attribute("data-filmname") != description.text: continue
metadata[description.text.replace(":","")] = {
"data-filmname": description.get_attribute("data-filmname").replace(":",""),
"data-year": description.get_attribute("data-year"),
"data-imdb": description.get_attribute("data-imdb").split(": ")[1],
"data-duration": description.get_attribute("data-duration"),
"data-country": description.get_attribute("data-country"),
"data-genre": description.get_attribute("data-genre"),
"data-descript": description.get_attribute("data-descript"),
"img": description.find_element_by_tag_name("img").get_attribute("src")
}
return results, metadata
def get_metadata_from_video(self, url):
filmname = self.driver.find_element(
By.XPATH, "//*[@id=\"info\"]/div[1]/div[1]/h1"
).text
metadata = {}
description = (
self.driver.find_elements(By.CLASS_NAME, "_skQummZWZxE") + \
self.driver.find_elements(By.CLASS_NAME, "_snsNGwwUUBn") + \
self.driver.find_elements(
By.XPATH, "/html/body/main/div/div/section/div[5]/div/box/div/div/div/div[3]"
)
)
metadata[filmname] = {
"data-filmname": filmname,
"data-year": description[0].text.split("\n")[1],
"data-imdb": description[1].text.split("\n")[1],
"data-duration": description[3].text.split("\n")[1],
"data-country": description[8].text.split(": ")[1],
"data-genre": description[6].text.split(": ")[1],
"data-descript": self.driver.find_element(
By.CLASS_NAME, "_snmrSkaJSTK").text.split("\n")[1],
"img": description[-1].get_attribute("src")
}
if not metadata[filmname]["img"]:
metadata[filmname]["img"] = \
"https://upload.wikimedia.org/wikipedia/commons/a/af/Question_mark.png"
return metadata
def wait_until_element(self, stratagy, locator, timeout=10):
wait = WebDriverWait(self.driver, timeout)
element = wait.until(
EC.presence_of_element_located(
(
stratagy, locator
)
)
)
return element
def open_link(self, url):
self.driver.get(url)
# The following code only runs when the adblock is still initializing from the first launch
if self.first_launch:
# Searches for any ads on the site
element = self.driver.find_elements(
By.XPATH,
"//*[@id=\"container-b530c7d909bb9eb21c76642999b355b4\"]/div[2]/div[5]/div/div[3]"
)
if element: # If any ads were found, refresh the page and run the ad check again
time.sleep(0.5)
self.driver.refresh()
self.open_link(url)
self.first_launch = False
def current_url(self):
return self.driver.current_url
def close(self):
self.driver.close()
def get_results_from_search(self, element_class="item_hd", decription_class="_smQamBQsETb"):
elements = self.driver.find_elements_by_class_name(element_class)
description = self.driver.find_elements_by_class_name(decription_class) # _skQummZWZxE
return elements, description
def screenshot_captcha(self, captcha_element, filename="captcha.png"):
self.driver.save_screenshot(filename)
# self.driver.save_screenshot("full_page.png")
location = captcha_element.location
location["y_off"] = 50
location["x_off"] = 120
return crop.crop(filename, location, cfg.executable)
def check_captcha(self):
# Myles
# Liam
try:
captcha_image = self.wait_until_element(
By.XPATH,
"//*[@id=\"checkcapchamodelyii-captcha-image\"]",
timeout=1.5
)
captcha_input = self.driver.find_element(By.XPATH, "//*[@id=\"checkcapchamodelyii-captcha\"]")
captcha_submit = self.driver.find_element(By.XPATH, "//*[@id=\"player-captcha\"]/div[3]/div/div")
except TimeoutException:
return None, None, None
if captcha_image:
print("DEBUG: Captcha!")
log("Captcha! Solve using the command:\n```beta solve <captcha_solution>```")
return captcha_image, captcha_input, captcha_submit
def run_captcha_functions(self):
captcha_image, captcha_input, captcha_submit = self.check_captcha()
if captcha_image:
time.sleep(0.25)
self.screenshot_captcha(captcha_image)
# log("DEBUG--file=captcha.png")
# solved_captcha = check_for_captcha_solve(timeout=1)
solved_captcha = False
if solved_captcha:
captcha_input.send_keys(solved_captcha)
captcha_submit.click()
def get_download_link(self, source_url, timeout=10):
movie = "watch-tv-show" not in source_url
# Link is a movie
if movie:
source_url = source_url.split(".html")[0] + (".html" if ".html" in source_url else "")
if not source_url.endswith("-online-for-free.html"):
source_url += "-online-for-free.html"
source_url_list = [source_url]
# Link is a TV show season
elif not source_url.endswith(".html"):
self.open_link(source_url)
source_url_list = self.driver.find_elements(By.XPATH, "//*[@class=\"_sXFMWEIryHd \"]")
for index, source_url in enumerate(source_url_list):
source_url_list[index] = source_url.get_attribute("href")
# Link is a TV show episode
else:
source_url = source_url.split(".html")[0] + ".html"
if not source_url.endswith("-online-for-free.html"):
source_url += "-online-for-free.html"
source_url_list = [source_url]
download_queue = []
for url in source_url_list:
if not url.endswith("-online-for-free.html"):
continue
self.open_link(url)
if self.run_captcha_functions(): self.get_download_link(url, timeout)
metadata = self.get_metadata_from_video(url) # Works for movies and TV
target_url = self.wait_until_element(
By.TAG_NAME, "video", timeout
).get_attribute("src")
self.driver.execute_script(
"videos = document.querySelectorAll(\"video\"); for(video of videos) {video.pause()}"
)
print(target_url)
download_queue.append((target_url,metadata,self.author))
# TODO: write all of the download links to a list so they can be downloaded in sequential order later (maybe return the list?)
return download_queue
# '''Demitri's Holy Contribution'''
# def get_movie(self, name):
# self.driver.get_link_by_partial_text("").click()
# self.driver.find_element_by_tag_name("input").text()
def download_first_from_search(self, search_query):
start_time = time.time()
search_results, metadata = self.search(
"https://gomovies-online.cam/search/" + \
"-".join(search_query.split())
)
if search_results:
search_time_elapsed = round(time.time()-start_time,2)
print(f"Finished scraping {len(search_results)} results in {search_time_elapsed} seconds!")
source_url = search_results[0].get_attribute("href")
download_queue = self.get_download_link(
source_url + ("-online-for-free.html" if "watch-tv-show" not in source_url else "")
) # [(x,y,z),(x,y,z),(x,y,z),...(x,y,z)]
print("Link found." if len(download_queue) == 1 else f"{len(download_queue)} links found.")
else:
print("Error: No search results found!")
print(f"Finished all scraping in {round(time.time()-start_time,2)} seconds!")
return download_queue # [(url,metadata,author)]
def run(self, search_query):
download_queue = self.download_first_from_search(search_query)[0]
return download_queue
def check_for_captcha_solve(timeout=100):
if __name__ == "__main__":
media.write_file("captcha.txt", input("Solve the captcha:\n> "))
filename = "captcha.txt"
for half_second in range(timeout*2):
time.sleep(0.5)
if os.path.isfile(filename):
solved_captcha = media.read_file(filename)[0]
media.remove_file(filename)
return solved_captcha
log(f"Captcha was not solved withing {timeout} seconds.\nAborting download.", silent=False)
return False
def error(e):
''' Code by Confused Cottonmouth - Jan 13 2021 '''
exc_type, exc_obj, exc_tb = sys.exc_info()
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
return f"```javascript\nException type: {exc_type}\nFile name: {filename}\nLine Number: {exc_tb.tb_lineno}\nException data: {e}```"
if __name__ == "__main__":
scraper = Scraper(minimize=False)
while True:
query = input("Enter a Title to search for:\n> ")
if query:
scraper.run(query)
else:
break
scraper.close()
# The criminal I've been chasing is wearing my shoes.
| StarcoderdataPython |
122523 | """This is a 'Hello, world' program."""
def hello():
"""Say, hello."""
print("Hello, World!")
if __name__ == "__main__":
hello()
| StarcoderdataPython |
1701016 | <gh_stars>0
from django.test import TestCase
from unittest.mock import MagicMock, patch
from handledapp.models import Invoice, Item, Carrot
from handledapp.handlers import InvoiceSignalHandler
from datetime import datetime
from cached_fields.exceptions import UnauthorisedChange
from cached_fields.handlers import CachedFieldSignalHandler
# Create your tests here.
class TestCalculationOnCreate(TestCase):
def setUp(self):
self.item = Item.objects.create(name="ARSE", price=3)
def test_cacheIsInitialisedWhenModelCreated(self):
invoice = Invoice.objects.create(item=self.item, quantity=99)
self.assertEqual(invoice.total, 297)
invoice.quantity = 3
invoice.save()
self.assertEqual(invoice.total, 9)
def test_cacheIsInitialisedWhenModelInitialisedAndSaved(self):
invoice = Invoice(item=self.item, quantity=99)
invoice.save()
self.assertEqual(invoice.total, 297)
invoice.quantity = 3
invoice.save()
self.assertEqual(invoice.total, 9)
class TestMultipleCachedFieldsOnModel(TestCase):
def setUp(self):
pass
def test_multipleCachedFields_shouldUpdateWhenSaved(self):
carrot = Carrot.objects.create(value_one=2, value_two=11)
self.assertEqual(carrot.multiple, 22)
self.assertEqual(carrot.addition, 13)
| StarcoderdataPython |
182079 | from random import randint
pc = randint(0,10)
joga = int(input('Tente adivinhar o número que eu escolhi: '))
cont = 1
while joga != pc:
if joga < pc:
joga = int(input('Mais! Tente novamente: '))
if joga > pc:
joga = int(input('Menos! Tentenovamente: '))
cont += 1
print(f'PARABÉNS! Você tentou {cont} vezes para acertar.') | StarcoderdataPython |
1741334 | <gh_stars>0
"""JetMET tools: CMS analysis-level jet corrections and uncertainties
These classes provide computation of CMS jet energy scale and jet energy resolution
corrections and uncertainties on columnar data.
"""
from .FactorizedJetCorrector import FactorizedJetCorrector
from .JetResolution import JetResolution
from .JetResolutionScaleFactor import JetResolutionScaleFactor
from .JetCorrectionUncertainty import JetCorrectionUncertainty
from .JetTransformer import JetTransformer
from .JECStack import JECStack
from .CorrectedJetsFactory import CorrectedJetsFactory
from .CorrectedMETFactory import CorrectedMETFactory
__all__ = [
'FactorizedJetCorrector',
'JetResolution',
'JetResolutionScaleFactor',
'JetCorrectionUncertainty',
'JetTransformer',
'JECStack',
'CorrectedJetsFactory',
'CorrectedMETFactory'
]
| StarcoderdataPython |
1679199 | n = eval(input('Enter Number To Check Collatz Conjecture: '))
count = 0
if (n == 1):
count = 0
print("Sorry, this is an infinite loop of 1 - 2 - 4 - 2 - 1 - 4. And it's aldready 1.")
print("Count =", count)
elif(n == 2):
count = 2
print("Sorry, this is an infinite loop of 1 - 2 - 4 - 2 - 1 - 4.")
print("Count =",count)
elif(n == 4):
count = 2
print("Sorry, this is an infinite loop of 1 - 2 - 4 - 2 - 1 - 4")
print("Count =",count)
while(n != 1):
if(n % 2 == 0):
count = count + 1
n = (n/2)
print(n)
else:
count = count + 1
n = (3*n)+1
print(n)
print("Count =",count) | StarcoderdataPython |
1663630 | # Code was created by <NAME>, 2020/01/13
# https://github.com/ezygeo-ai/machine-learning-and-geophysical-inversion/blob/master/scripts/fwd_sp.py
import numpy as np
import matplotlib.pyplot as plt
import pickle
# SP forward function
def SPfunc(x_inp, par):
var_x0 = par[0]
var_alpha = par[1]
var_h = par[2]
var_k = par[3]
var_sp = []
for i in x_inp:
var_up = (i - var_x0) * np.cos(var_alpha) - var_h * np.sin(var_alpha)
var_down = ((i - var_x0)*(i - var_x0) + var_h*var_h) ** (3/2)
var = var_k * (var_up / var_down)
var_sp.append(var)
# === give noise for data (Gaussian Noise) 1
std_noise = 10 # = %
mean_noise = 0
noise_data = np.random.normal(mean_noise, np.sqrt(std_noise), len(var_sp))
var_sp_noise = var_sp + noise_data
return var_sp, var_sp_noise, noise_data
# === TEST FORWARD MODELING
x0 = 77.07 # m
alpha = 309.37 * (np.pi/180) # deg2rad
h = 41.81 # m
K = 94686
measure_loc = np.linspace(0, 150, 101) # Location of measurement
print('number of data: ', len(measure_loc))
par_mod = [x0, alpha, h, K] # model parameter of subsurface
get_SPData, get_SPData_noise, noise_from_maxData = SPfunc(measure_loc, par_mod) # forward modeling test
plt.figure()
plt.plot(measure_loc, get_SPData, 'b.')
plt.plot(measure_loc, get_SPData_noise, 'r*')
plt.xlim([0, 150])
plt.ylim([-10, 50])
plt.xlabel('position (m)')
plt.ylabel('SP data (mV)')
plt.legend(['ori', 'noise'])
plt.grid()
plt.figure()
plt.hist(noise_from_maxData, density=True, bins=20)
plt.ylabel('noise distribution')
plt.show()
with open('../data/SP_syn_data.pickle', 'wb') as f:
pickle.dump([measure_loc, get_SPData_noise], f)
| StarcoderdataPython |
111434 | <reponame>Sahmwell/G15_Capstone
from env.SumoEnv import SumoEnv
import time
def main():
test = SumoEnv(1000, False)
test.reset()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1676874 | <filename>architect/orms/peewee/features.py
"""
Defines features for the Peewee ORM.
"""
from peewee import CompositeKey
from ..bases import BasePartitionFeature, BaseOperationFeature
class OperationFeature(BaseOperationFeature):
def execute(self, sql, autocommit=True):
return self.model_cls._meta.database.execute_sql(sql.replace('%', '%%'), require_commit=autocommit)
class PartitionFeature(BasePartitionFeature):
decorate = ('save',)
@property
def model_meta(self):
meta = self.model_cls._meta
pk = meta.primary_key
return {
'table': meta.db_table,
'pk': list(pk.field_names) if isinstance(pk, CompositeKey) else pk.name,
'dialect': meta.database.__class__.__name__.lower().replace('database', ''),
'column_value': self._column_value([field for field in meta.fields.keys()]),
}
@staticmethod
def _decorate_save(method):
"""
Checks if partition exists and creates it if needed before saving model instance.
"""
def wrapper(instance, *args, **kwargs):
partition = instance.architect.partition.get_partition()
if not partition.exists():
partition.create()
method(instance, *args, **kwargs)
return wrapper
| StarcoderdataPython |
3274472 | import logging
from . import BaseAuthenticatedApiView
from rest_framework.response import Response
logger = logging.getLogger(__name__)
class StatusAuthenticatedApiView(BaseAuthenticatedApiView):
"""
View for authenticated status check API method.
"""
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def get(self, request):
logger.info("Authenticated status API check called")
logger.error("BANK!")
# return empty response with HTTP 200 status code
return Response()
| StarcoderdataPython |
26295 | <reponame>heytrav/drs-project
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-26 01:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AccountDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200)),
('surname', models.CharField(max_length=200)),
('middle_name', models.CharField(blank=True, max_length=200)),
('email', models.CharField(max_length=200)),
('email2', models.CharField(blank=True, max_length=200)),
('email3', models.CharField(blank=True, max_length=200)),
('telephone', models.CharField(blank=True, max_length=200)),
('fax', models.CharField(blank=True, max_length=200)),
('company', models.CharField(blank=True, max_length=200)),
('house_number', models.CharField(max_length=10)),
('street1', models.CharField(max_length=200)),
('street2', models.CharField(blank=True, max_length=200)),
('street3', models.CharField(blank=True, max_length=200)),
('city', models.CharField(max_length=200)),
('suburb', models.CharField(blank=True, max_length=200)),
('state', models.CharField(blank=True, max_length=200)),
('postcode', models.CharField(max_length=20)),
('country', models.CharField(max_length=2)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('postal_info_type', models.CharField(choices=[('int', 'international'), ('loc', 'local')], default='loc', max_length=3)),
('disclose_name', models.BooleanField(default=False)),
('disclose_company', models.BooleanField(default=False)),
('disclose_address', models.BooleanField(default=False)),
('disclose_telephone', models.BooleanField(default=False)),
('disclose_fax', models.BooleanField(default=False)),
('disclose_email', models.BooleanField(default=False)),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='personal_details', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('telephone', models.CharField(blank=True, max_length=200, null=True)),
('fax', models.CharField(blank=True, max_length=200, null=True)),
('company', models.CharField(blank=True, max_length=200, null=True)),
('house_number', models.CharField(blank=True, max_length=10, null=True)),
('street1', models.CharField(max_length=200, null=True)),
('street2', models.CharField(blank=True, max_length=200, null=True)),
('street3', models.CharField(blank=True, max_length=200, null=True)),
('city', models.CharField(max_length=200, null=True)),
('suburb', models.CharField(blank=True, max_length=200, null=True)),
('state', models.CharField(blank=True, max_length=200, null=True)),
('postcode', models.CharField(max_length=20, null=True)),
('country', models.CharField(max_length=2, null=True)),
('postal_info_type', models.CharField(choices=[('int', 'international'), ('loc', 'local')], default='loc', max_length=3)),
('authcode', models.CharField(blank=True, max_length=100, null=True)),
('roid', models.CharField(blank=True, max_length=100, null=True)),
('disclose_name', models.BooleanField(default=False)),
('disclose_company', models.BooleanField(default=False)),
('disclose_address', models.BooleanField(default=False)),
('disclose_telephone', models.BooleanField(default=False)),
('disclose_fax', models.BooleanField(default=False)),
('disclose_email', models.BooleanField(default=False)),
('status', models.CharField(max_length=200, null=True)),
('registry_id', models.CharField(max_length=200, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('account_template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.AccountDetail')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contacts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ContactType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='DefaultAccountContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mandatory', models.BooleanField(default=False)),
('account_template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.AccountDetail')),
('contact_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.ContactType')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_account_contact', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DefaultAccountTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.AccountDetail')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_account', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DefaultContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mandatory', models.BooleanField(default=False)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Contact')),
('contact_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.ContactType')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_contact', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DefaultRegistrant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_registrant', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Domain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='DomainContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.NullBooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Contact')),
('contact_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.ContactType')),
],
),
migrations.CreateModel(
name='DomainProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('slug', models.CharField(max_length=100, unique=True)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='DomainRegistrant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.NullBooleanField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='RegisteredDomain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.NullBooleanField()),
('auto_renew', models.BooleanField(default=True)),
('registration_period', models.IntegerField()),
('authcode', models.CharField(max_length=100, null=True)),
('roid', models.CharField(max_length=100, null=True)),
('status', models.CharField(max_length=200, null=True)),
('anniversary', models.DateTimeField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('domain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Domain')),
],
),
migrations.CreateModel(
name='Registrant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('registry_id', models.CharField(max_length=200, unique=True)),
('name', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('telephone', models.CharField(blank=True, max_length=200, null=True)),
('fax', models.CharField(blank=True, max_length=200, null=True)),
('company', models.CharField(blank=True, max_length=200, null=True)),
('house_number', models.CharField(blank=True, max_length=10, null=True)),
('street1', models.CharField(max_length=200, null=True)),
('street2', models.CharField(blank=True, max_length=200, null=True)),
('street3', models.CharField(blank=True, max_length=200, null=True)),
('city', models.CharField(max_length=200, null=True)),
('suburb', models.CharField(blank=True, max_length=200, null=True)),
('state', models.CharField(blank=True, max_length=200, null=True)),
('status', models.CharField(max_length=200, null=True)),
('postcode', models.CharField(max_length=20, null=True)),
('country', models.CharField(max_length=2, null=True)),
('postal_info_type', models.CharField(choices=[('int', 'international'), ('loc', 'local')], default='loc', max_length=3)),
('authcode', models.CharField(blank=True, max_length=100, null=True)),
('roid', models.CharField(blank=True, max_length=100, null=True)),
('disclose_name', models.BooleanField(default=False)),
('disclose_company', models.BooleanField(default=False)),
('disclose_address', models.BooleanField(default=False)),
('disclose_telephone', models.BooleanField(default=False)),
('disclose_fax', models.BooleanField(default=False)),
('disclose_email', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('account_template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.AccountDetail')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='registrants', to=settings.AUTH_USER_MODEL)),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider')),
],
),
migrations.CreateModel(
name='TopLevelDomain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zone', models.CharField(max_length=100, unique=True)),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='TopLevelDomainProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('anniversary_notification_period_days', models.IntegerField(default=30)),
('renewal_period', models.IntegerField(default=30)),
('grace_period_days', models.IntegerField(default=30)),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider')),
('zone', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.TopLevelDomain')),
],
),
migrations.AddField(
model_name='registereddomain',
name='tld',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.TopLevelDomain'),
),
migrations.AddField(
model_name='registereddomain',
name='tld_provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.TopLevelDomainProvider'),
),
migrations.AddField(
model_name='domainregistrant',
name='registered_domain',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='registrant', to='domain_api.RegisteredDomain'),
),
migrations.AddField(
model_name='domainregistrant',
name='registrant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Registrant'),
),
migrations.AddField(
model_name='domaincontact',
name='registered_domain',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contacts', to='domain_api.RegisteredDomain'),
),
migrations.AddField(
model_name='defaultregistrant',
name='registrant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.Registrant'),
),
migrations.AddField(
model_name='defaultcontact',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider'),
),
migrations.AddField(
model_name='defaultaccounttemplate',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider'),
),
migrations.AddField(
model_name='defaultaccountcontact',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider'),
),
migrations.AddField(
model_name='contact',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='domain_api.DomainProvider'),
),
migrations.AlterUniqueTogether(
name='registereddomain',
unique_together=set([('domain', 'tld', 'active')]),
),
migrations.AlterUniqueTogether(
name='domainregistrant',
unique_together=set([('registered_domain', 'registrant', 'active')]),
),
migrations.AlterUniqueTogether(
name='domaincontact',
unique_together=set([('registered_domain', 'contact_type', 'contact', 'active')]),
),
migrations.AlterUniqueTogether(
name='defaultregistrant',
unique_together=set([('project_id', 'registrant')]),
),
migrations.AlterUniqueTogether(
name='defaultcontact',
unique_together=set([('project_id', 'contact_type', 'contact', 'provider')]),
),
migrations.AlterUniqueTogether(
name='defaultaccounttemplate',
unique_together=set([('project_id', 'provider', 'account_template')]),
),
migrations.AlterUniqueTogether(
name='defaultaccountcontact',
unique_together=set([('project_id', 'contact_type', 'account_template', 'provider', 'mandatory')]),
),
]
| StarcoderdataPython |
3398755 | """Added columns to user_to_lesson
Revision ID: 530c0d70d57d
Revises: 2<PASSWORD>
Create Date: 2013-11-01 14:15:16.414902
"""
# revision identifiers, used by Alembic.
revision = '530c0d70d57d'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user_to_lesson', sa.Column('completed', sa.Boolean(), nullable=True))
op.add_column('user_to_lesson', sa.Column('recent_step_number', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user_to_lesson', 'recent_step_number')
op.drop_column('user_to_lesson', 'completed')
### end Alembic commands ###
| StarcoderdataPython |
3284508 |
"""
Copyright (C) 2016, Blackboard Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of Blackboard Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY BLACKBOARD INC ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BLACKBOARD INC. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Created on May 25, 2016
@author: shurrey
"""
import json
import requests
import time
import jwt
import datetime
import ssl
import sys
class SessionController():
def __init__(self, target_url, token, verify_certs):
self.target_url = target_url
self.token = token
self.verify_certs = verify_certs
def getSessionId(self):
return self.SESSION_ID
def getGuestUrl(self):
return self.GUEST_URL
def getModUrl(self):
return self.MODERATOR_URL
def createSession(self, payload):
#"Authorization: Bearer $token"
authStr = 'Bearer ' + self.token
r = requests.post("https://" + self.target_url + '/sessions', headers={ 'Authorization':authStr,'Content-Type':'application/json','Accept':'application/json' }, json=payload, verify=self.verify_certs)
if r.status_code == 200:
res = json.loads(r.text)
print("Session: " + json.dumps(res,indent=4, separators=(',', ': ')))
self.SESSION_ID = res['id']
return(self.SESSION_ID)
else:
print("Sessions.createSession ERROR: " + str(r))
return(None)
def enrollUser(self, session_id, user, role):
#"Authorization: Bearer $token"
authStr = 'Bearer ' + self.token
payload = {
'launchingRole' : role,
'editingPermission':'writer',
'user': user
}
r = requests.post("https://" + self.target_url + '/sessions/' + session_id + "/url", headers={'Authorization':authStr,'Content-Type':'application/json','Accept':'application/json'}, json=payload, verify=self.verify_certs)
if r.status_code == 200:
res = json.loads(r.text)
print(json.dumps(res,indent=4, separators=(',', ': ')))
url = res['url']
return(url)
else:
print("Sessions.enrollUser ERROR: " + str(r))
return(None) | StarcoderdataPython |
3291796 | <reponame>StuWares/Hacktoberfest2018
// Language: Python
// Author: heckerman100
print("Hello World)
| StarcoderdataPython |
179126 | # -*- coding:utf-8 -*-
import requests
import os
import openpyxl
from rrunner.common.handle_config import config
from rrunner.common.handle_path import DATA_DIR, CASE_DIR
def getByPath(path, obj):
paths = path.split(".")
for path in paths:
obj = obj.get(path, None)
if obj == None:
break
return obj
def dic2String(obj):
rs = ['{']
for kv in obj.items():
rs.append('\"' + kv[0] + "\":\"" + str(kv[1]) + '\"')
rs.append(',')
if len(rs) > 2:
rs.pop()
rs.append('}')
return ''.join(rs)
def parseBody(parItem, body, raw):
if parItem.get('schema', None) != None:
refPath = getByPath('schema.$ref', parItem)
if refPath == None:
# 数组
refPath = getByPath('schema.items.$ref', parItem)
if refPath != None:
refPath = refPath.replace('#/definitions/', '')
refData = getByPath('definitions.' + refPath + '.properties', raw)
if refData != None:
for ri in refData.items():
body[ri[0]] = (0 if ri[1].get('type', None) == 'integer'
else "")
elif parItem.get('description', None) != None:
body['_parms'] = parItem['description']
else:
body[parItem['name']] = ''
else:
body[parItem['name']] = (0 if parItem.get('type', None) == 'integer'
else "")
def writeRow(func, ws, i):
i = str(i)
ws['A' + i] = func['case_id']
ws['B' + i] = func['title']
ws['C' + i] = func['interface']
ws['D' + i] = func['content-type']
ws['E' + i] = func['method']
ws['F' + i] = func['url']
ws['G' + i] = func['data']
ws['H' + i] = func['expected']
ws['I' + i] = func['check_sql']
ws['J' + i] = func['result']
ws['K' + i] = func['tag']
def writeCaseClass(cName):
caseName = 'test_' + cName + '_controller.py'
dataName = 'test_' + cName + '_controller.xlsx'
isExist = os.path.exists(os.path.join(CASE_DIR + "\InnerApi", caseName))
if isExist:
return
f = open(os.path.join(CASE_DIR + "\InnerApi", caseName), 'w')
f.write("import os\n")
f.write("import allure\n")
f.write("import pytest\n")
f.write("from common.handle_excel import Excel\n")
f.write("from common.handle_path import DATA_DIR\n")
f.write("from common.handle_config import config\n")
f.write("from common.requtest_assert import RequestsAssert\n")
f.write("class Test" + cName + ":\n")
f.write(' excel = Excel(os.path.join(DATA_DIR, "{}"), "Sheet")\n'.format(dataName))
f.write(" test_data = excel.read_excel()\n")
f.write(' module = config.get("test_data", "module")\n')
f.write(' if module == "0":\n')
f.write(' for i in range(0, len(test_data) - 1):\n')
f.write(' if None == test_data[i]["tag"]:\n')
f.write(' del (test_data[i])\n')
f.write(' @allure.feature("{}")\n'.format(cName))
f.write(" @pytest.mark.parametrize('item', test_data)\n")
f.write(' def test_' + cName + '(self, item, get_token):\n')
f.write(" headers = get_token\n")
f.write(" res = RequestsAssert.apiRequest(item, headers)\n")
f.write(" write = self.excel.write_excel\n")
f.write(" RequestsAssert.apiAssert(res, item, write)\n")
def writeCase(cName, funcs):
caseName = 'test_' + cName + '_controller.xlsx'
isExist = os.path.exists(os.path.join(DATA_DIR, caseName))
if isExist:
return
wb = openpyxl.Workbook()
ws = wb.active
i = 1
for func in funcs:
writeRow(func, ws, i)
i += 1
wb.save(os.path.join(DATA_DIR, caseName))
def main(catName, rules):
rs = requests.get(config.get("env", "swagger_url"))
raw = rs.json()
paths = getByPath("paths", raw)
funcs = []
lastCName = None
i = 1
keys = paths.keys()
# keys.sort()
keys = sorted(keys)
for pKey in keys:
path = pKey
value = paths[pKey]
cName = path.split('/')[1]
if catName != '*' and cName != catName:
continue
if lastCName != cName and lastCName != None:
writeCase(lastCName, funcs)
writeCaseClass(lastCName)
i = 1
funcs = []
lastCName = cName
method = 'post' if value.get('post', None) != None else 'get'
value = value[method]
params = getByPath("parameters", value)
desc = getByPath("summary", value)
body = {}
query = {}
data = {}
for par in params:
if par['in'] == 'body':
parseBody(par, body, raw)
elif par['in'] == 'query':
query[par['name']] = ''
data = {'query': query, 'body': body}
# if len(body) > 0 and len(query) > 0:
# data = {query: query, body: body}
# else:
# data = body if len(body) > 0 else query
if i == 1:
funcs.append({
'case_id': 'case_id',
'title': 'title',
'content-type': 'content-type',
'interface': 'interface',
'url': 'url',
'method': 'method',
'data': 'data',
'expected': 'expected',
'check_sql': 'check_sql',
'result': 'result',
'tag': 'tag'
})
item = {
'case_id': str(i),
'title': desc,
'content-type': 'union',
'interface': path,
'url': "/smartfactory" + path,
'method': method,
'data': '',
'expected': '{\"innerCode\":"200"}',
'check_sql': '',
'result': '',
'tag': ''
}
if len(body) > 0:
item['content-type'] = 'data'
if len(body) == 1 and body.get('_parms', None) != None:
item['data'] = body['_parms']
else:
item['data'] = dic2String(body)
else:
item['content-type'] = 'params'
item['data'] = dic2String(query)
if method == "post":
item['content-type'] = 'json'
else:
item['content-type'] = 'params'
funcs.append(item)
i += 1
writeCase(lastCName, funcs)
writeCaseClass(lastCName)
def parseArgs():
args = {
'int': {
'min': 0,
'max': 100
},
'string': {
'min': 0,
'max': 100,
'whiteSpace': True,
'required': True
}
}
return args
main('*', parseArgs())
| StarcoderdataPython |
1678061 | <gh_stars>0
# time: O(len(S) * T(in))
# space: O(1)
class Solution:
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
res = 0
for s in S:
if s in J:
res += 1
return res | StarcoderdataPython |
4835989 | from pytest_cases import parametrize
from tests.conftest import get_expected_get_headers, get_expected_post_headers
from tests.mms.conftest import (
get_inbound_mms_messages_query_parameters,
get_inbound_mms_messages_response,
get_mms_body_multipart,
get_mms_body_request,
get_mms_delivery_reports_query_parameters,
get_mms_delivery_reports_response,
get_send_mms_response,
)
ENDPOINT_TEST_ARGUMENTS = {
"send_mms": {
"response_content": get_send_mms_response(),
"endpoint": "/mms/1/single",
"http_method": "POST",
"expected_headers": get_expected_post_headers(
"multipart/form-data; boundary=mockBoundary"
),
"expected_query_parameters": None,
"expected_data": get_mms_body_multipart(),
"request_data": get_mms_body_request(),
"method_name": "send_mms_message",
},
"get_mms_delivery_reports": {
"response_content": get_mms_delivery_reports_response(),
"endpoint": "/mms/1/reports",
"http_method": "GET",
"expected_headers": get_expected_get_headers(),
"expected_query_parameters": "messageId=abc-123&limit=1",
"expected_data": None,
"request_data": get_mms_delivery_reports_query_parameters(),
"method_name": "get_mms_delivery_reports",
},
"get_inbound_mms_messages": {
"response_content": get_inbound_mms_messages_response(),
"endpoint": "/mms/1/inbox/reports",
"http_method": "GET",
"expected_headers": get_expected_get_headers(),
"expected_query_parameters": "limit=1",
"expected_data": None,
"request_data": get_inbound_mms_messages_query_parameters(),
"method_name": "get_inbound_mms_messages",
},
}
@parametrize(endpoint_type=ENDPOINT_TEST_ARGUMENTS.keys(), status_code=(200, 400, 500))
def case__supported_status(endpoint_type, status_code):
if endpoint_type == "send_mms":
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["request_data"]["media"].seek(0)
return (
status_code,
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["response_content"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["endpoint"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["http_method"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["expected_headers"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["expected_query_parameters"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["expected_data"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["request_data"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["method_name"],
)
@parametrize(endpoint_type=ENDPOINT_TEST_ARGUMENTS.keys())
def case__unsupported_status(endpoint_type):
if endpoint_type == "send_mms":
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["request_data"]["media"].seek(0)
return (
201,
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["response_content"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["endpoint"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["http_method"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["expected_headers"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["expected_query_parameters"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["expected_data"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["request_data"],
ENDPOINT_TEST_ARGUMENTS[endpoint_type]["method_name"],
)
| StarcoderdataPython |
15832 | from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import render
from django.http.response import JsonResponse
from nitmis_admin.serializers.UserSerializer import UserSerializer
def create_user(role="Guest"):
"""
"""
def fun_wrapper(func):
def wrapper(*args, **kwargs):
serializer = UserSerializer(data=args[1].data)
#
# If the data is valid, create a new user
# and return the access token details.
if serializer.is_valid():
serializer.save(role=role)
return JsonResponse(serializer.data)
return JsonResponse({"errors": serializer.errors}, status=422)
return wrapper
return fun_wrapper
class Register(APIView):
'''
Parent register controller. Post requests create
a general Guest account
'''
def get(self, request):
'''
Renders the base layout on GET request. Frontend
handles the rendering of forms
'''
return render(request, 'base.html')
@create_user()
def post(self, request):
'''
Registers a new user and assigns the user
a Guest role.
'''
class AdminRegister(Register):
'''
Register controller for administrators.
'''
@create_user(role="Administrator")
def post(self, request):
'''
Overriden post function. Registers the user as
an administrator
'''
| StarcoderdataPython |
3378562 | <reponame>PDBe-KB/pdbe-kb-uniprot-variant-import
import csv
from uniprot_variant_import.constants import *
class VariationImport(object):
"""
This object is responsible for parsing the data from a JSON file that is in
the UniProt variant API format, and for extracting relevant information, saving it
in multiple CSV files.
These CSV files are used for importing data into Neo4j, and have a defined format.
"""
def __init__(self, data):
"""
:param data: JSON data that complies with the UniProt variant API format, see UNIPROT_API_URL for examples
"""
self.data = data
self.unp_variant_csv_path = UNP_VARIANT_CSV_PATH
self.xref_csv_path = UNP_XREF_CSV_PATH
self.evidence_csv_path = UNP_EVIDENCE_CSV_PATH
self.association_csv_path = UNP_ASSOCIATION_CSV_PATH
self.unp_unp_variant_csv_path = UNP_UNP_VARIANT_CSV_PATH
self.unp_variant_xref_csv_path = UNP_VARIANT_XREF_CSV_PATH
self.unp_variant_association_csv_path = UNP_VARIANT_ASSOCIATION_CSV_PATH
self.unp_variant_evidence_csv_path = UNP_VARIANT_EVIDENCE_CSV_PATH
self.unp_assoc_xref_csv_path = UNP_ASSOCIATION_XREF_CSV_PATH
self.unp_assoc_evidence_csv_path = UNP_ASSOCIATION_EVIDENCE_CSV_PATH
self.xref_keys = ['name', 'id', 'url', 'alternativeUrl']
self.unp_variant_keys = ['type', 'description', 'alternativeSequence',
'begin', 'end', 'wildType', 'polyphenPrediction',
'polyphenScore', 'siftPrediction', 'siftScore',
'somaticStatus', 'cytogeneticBand', 'consequenceType',
'genomicLocation', 'clinicalSignificances', 'sourceType']
self.evidence_keys = ['code', 'name', 'id', 'url', 'alternativeUrl']
self.association_keys = ['name', 'description', 'disease']
def run(self):
"""
Parses the JSON and writes out the relevant pieces of information. It first handles the actual variant data,
and then calls a number of other methods to handle the various other parts like xrefs and associations.
:return: Int or None; the number of variant features found
"""
if 'accession' not in self.data.keys():
return None
accession = self.data['accession']
feature_count = 0
xref_count = 0
evidences_count = 0
association_count = 0
for feature in self.data['features']:
feature_count += 1
variant_id = 'var_%s_%s' % (accession, feature_count)
self.save_to_rels_file(accession, variant_id, self.unp_unp_variant_csv_path)
self.save_to_file(feature, variant_id, self.unp_variant_keys, self.unp_variant_csv_path)
xref_count = self.read_xrefs(feature, variant_id, xref_count, self.unp_variant_xref_csv_path)
evidences_count = self.read_evidences(evidences_count, feature, variant_id, self.unp_variant_evidence_csv_path)
evidences_count, xref_count = self.read_associations(association_count, evidences_count, feature,
variant_id, xref_count)
return feature_count
def read_associations(self, association_count, evidences_count, feature, variant_id, xref_count):
"""
Parses the associations sub-dictionary of the data and saves information to files
:param association_count: Int; used for generating unique identifiers
:param evidences_count: Int; used for generating unique identifiers
:param feature: Dict; the data that has the required information
:param variant_id: String; used for generating unique identifiers
:param xref_count: Int; used for generating unique identifiers
:return:
"""
if 'association' in feature.keys():
for association in feature['association']:
association_count += 1
# Generate an identifier
association_id = 'assoc_%s_%s' % (variant_id, association_count)
# Save the relation between the variant and the association using the variant id and association id
self.save_to_rels_file(variant_id, association_id, self.unp_variant_association_csv_path)
# Save all the data items from the association dictionary
self.save_to_file(association, association_id, self.association_keys, self.association_csv_path)
# Update the count of xrefs (used for generating identifiers)
xref_count = self.read_xrefs(association, association_id, xref_count, self.unp_assoc_xref_csv_path)
# Update the count of evidences (used for generating identifiers)
evidences_count = self.read_evidences(evidences_count, association, association_id, self.unp_assoc_evidence_csv_path)
return evidences_count, xref_count
def read_xrefs(self, data, variant_id, xref_count, rels_path):
"""
Parses the xref sub-dictionary of the data and saves the information to files
:param data: Dict; sub-dictionary with the xref data
:param variant_id: String; used for generating unique identifiers
:param xref_count: Int; used for generating unique identifiers
:param rels_path: String; used for getting the path to the relevant CSV - Note: there are 2 different CSVs
:return: Int; the count of xrefs in the data
"""
data_array = None
if 'xrefs' in data.keys():
data_array = data['xrefs']
elif 'dbReferences' in data.keys():
data_array = data['dbReferences']
if not data_array:
return
for xref in data_array:
xref_count += 1
xref_id = 'xref_%s_%s' % (variant_id, xref_count)
self.save_to_file(xref, xref_id, self.xref_keys, self.xref_csv_path)
self.save_to_rels_file(variant_id, xref_id, rels_path)
return xref_count
def read_evidences(self, evidences_count, data, variant_id, rels_path):
"""
Parses the evidences sub-dictionary of the data and saves the information to files
:param evidences_count: Int; used for generating unique identifiers
:param data: Dict; sub-dictionary with the evidences data
:param variant_id: String; used for generating unique identifiers
:param rels_path: String; used for getting the path to the relevant CSV - Note: there are 2 different CSVs
:return: Int; the count of evidences in the data
"""
if 'evidences' in data.keys():
for evidence in data['evidences']:
evidences_count += 1
evidence_id = 'evid_%s_%s' % (variant_id, evidences_count)
flat_evidence = {}
if 'source' in evidence.keys():
flat_evidence = evidence['source']
flat_evidence['code'] = self.value_or_null(evidence, 'code')
self.save_to_file(flat_evidence, evidence_id, self.evidence_keys, self.evidence_csv_path)
self.save_to_rels_file(variant_id, evidence_id, rels_path)
return evidences_count
def save_to_file(self, data, identifier, key_list, csv_path):
"""
Write out all the data from a dictionary to a CSV file
:param data: Dict; data to be saved
:param identifier: String; identifier of the data
:param key_list: Array; a list of keys (Strings) to be used from the data dictionary
:param csv_path: String; path to the CSV file
:return: None
"""
csv_file = open(csv_path, 'a')
csv_writer = csv.writer(csv_file, dialect='excel')
row = [identifier]
for key in key_list:
row.append(self.value_or_null(data, key))
csv_writer.writerow(row)
csv_file.close()
def save_to_rels_file(self, x, y, csv_path):
"""
Write out data to a CSV file
:param x: String; data to write out
:param y: String; data to write out
:param csv_path: String; path to the CSV file
:return: None
"""
csv_file = open(csv_path, 'a')
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow((x, y))
csv_file.close()
def value_or_null(self, data, key):
"""
Checks if there is data for a given key
:param data: Dict; a sub-dictionary of the UniProt data
:param key: String; a key
:return: String; the value or ''
"""
if data and key in data.keys():
return str(data[key])
return ''
| StarcoderdataPython |
3364610 | <reponame>Ulises-Rosas/WoRMStools
#!/usr/bin/env python3
# -*- coding: utf-8 -*- #
import re
import time
from wormstools.utils import *
from wormstools.worms_core import Worms
def wid(file, win, wout):
fo = wout + '_worms_aphiaID.tsv' if wout != 'input_based' else cname(win, 'aphiaID')
pf = wformat(file, 'general')
msg, firstLine = msgFirstL('aphiaIDs', None)
print(msg)
f = open(fo, "w")
f.write(firstLine)
# file = open(options['spps']).read().split("\n")
for i in range(0, file.__len__()):
spps0 = file[i].replace("\n", "")
spps = modName(spps0)
if not spps:
continue
if re.findall(" sp[p\\.]{0,2}$", spps):
id = ''
else:
wObj = Worms(spps.lower())
id = wObj.aphiaID
time.sleep(0.5)
print(pf % (i + 1, spps0))
if not id:
f.write('%s\t%s\t%s\n' % (spps0, '', 'Record not found in WoRMS'))
else:
f.write('%s\t%s\t%s\n' % (spps0, id, ''))
f.close()
def wval(file, win, wout, wat):
fo = wout + '_worms_val.tsv' if wout != 'input_based' else cname(win, 'val')
pf1, pf2 = wformat(file, 'validation')
msg, firstLine = msgFirstL('validated names', wat)
print(msg)
f = open(fo, "w")
f.write(firstLine)
# file = open(options['spps']).read().split("\n")
for i in range(0, file.__len__()):
# i = 1
spps0 = file[i].replace("\n", "")
spps = modName(spps0)
if not spps:
continue
if re.findall(" sp[p\\.]{0,2}$", spps):
spps_v = ''
else:
wObj = Worms(spps.lower())
spps_v = wObj.taxamatch()
time.sleep(0.5)
OutStr = ''
if not spps_v:
OutStr += '%s\t%s\t%s' % (spps0, '', 'Record not found in WoRMS')
print(pf2 % (i + 1, spps0))
else:
OutStr += '%s\t%s\t%s' % (spps0, spps_v, '')
print(pf1 % (i + 1, spps0, spps_v))
if wat is not None:
OutStr = rankStr(spps_v, wObj, wat, OutStr)
f.write(OutStr + '\n')
f.close()
def wsyn(file, win, wout, wat):
fo = wout + '_worms_syn.tsv' if wout != 'input_based' else cname(win, 'syn')
pf = wformat(file, 'general')
msg, firstLine = msgFirstL('synonyms', wat)
print(msg)
f = open(fo, "w")
f.write(firstLine)
for i in range(0, file.__len__()):
# i = 4
spps0 = file[i].replace("\n", "")
# spps0 = "Alopias pelgicus"
spps = modName(spps0)
if not spps:
continue
if re.findall(" sp[p\\.]{0,2}$", spps):
syns = ''
else:
wObj = Worms(spps)
syns = wObj.get_synonyms()
time.sleep(0.5)
cc = '' if isinstance(syns, str) else 'in WoRMS'
print(pf % (i + 1, spps0))
OutStr = ''
if not cc:
OutStr += '%s\t%s\t%s' % (spps0, '', 'Record not found in WoRMS')
else:
jsyns = ", ".join(syns)
if wObj.accepted_name:
obs = "Deprecated name: %s" % spps0
spps0 = wObj.accepted_name
else:
obs = ''
OutStr += '%s\t%s\t%s' % (spps0, jsyns, obs)
if wat is not None:
OutStr = rankStr(cc, wObj, wat, OutStr)
f.write(OutStr + '\n')
f.close()
def wrank(file, win, wout, wat):
print("\nAdding taxonomical ranks:\n")
fo = wout + '_worms_ranks.tsv' if wout != 'input_based' else cname(win, 'ranks')
pf = wformat(file, 'general')
firstLine = "%s\tSpecies\tObs\n" % "\t".join(wat)
f = open(fo, "w")
f.write(firstLine)
for i in range(0, file.__len__()):
spps0 = file[i].replace("\n", "")
# spps0 = "Alopias pelgicus"
spps = modName(spps0)
if not spps:
continue
if re.findall(" sp[p\\.]{0,2}$", spps):
tax_ranks = []
else:
wObj = Worms(spps)
wObj.get_taxonomic_ranges()
tax_ranks = wObj.taxonomic_ranges
print(pf % (i + 1, spps0))
if not tax_ranks:
so = "%s\t%s" % (spps0, 'Record not found in WoRMS')
else:
if wObj.accepted_name:
obs = "deprecated name: %s" % spps0
spps0 = wObj.accepted_name
else:
obs = ''
so = "%s\t%s" % (spps0, obs)
f.write(rankStr(tax_ranks, wObj, wat, so) + "\n")
f.close()
| StarcoderdataPython |
1708164 | <gh_stars>0
import sqlite3
conn = sqlite3.connect('C:/Users/User/Desktop/test1.db')
cursor = conn.cursor()
def table_creation():
cursor.execute('''CREATE TABLE games(
ID INT,
name TEXT,
genre TEXT,
year INT,
studio TEXT
)
''')
table_creation()
conn.commit()
#cursor.execute('''SELECT * FROM students WHERE avg_score>9''')
'''
data = cursor.fetchall()
conn.commit()
print(data)''' | StarcoderdataPython |
1634325 | #!python3
from datetime import datetime
from datetime import date
datetime.today()
#datetime.datetime(2018, 2, 19, 14, 38, 52, 133483)
today = datetime.today()
print (type(today))
#<class 'datetime.datetime'>
todaydate = date.today()
print ('today:',todaydate)
#datetime.date(2018, 2, 19)
type(todaydate)
#<class 'datetime.date'>
print ('month:',todaydate.month)
#2
print('year:',todaydate.year)
#2018
print ('day:',todaydate.day)
#19
christmas = date(2020, 12, 25)
print ('chr:',christmas)
#datetime.date(2018, 12, 25)
if christmas is not todaydate:
print("Sorry there are still " + str((christmas - todaydate).days) + " until Christmas!")
else:
print("Yay it's Christmas!")
| StarcoderdataPython |
153548 | <gh_stars>1-10
import json
import numpy as np
from sklearn import metrics
def purity_score(y_true,y_pred):
contingency_matrix = metrics.cluster.contingency_matrix(y_true,y_pred)
return np.sum(np.amax(contingency_matrix,axis=0))/np.sum(contingency_matrix)
result_path = 'work_dirs/res50_3mouse_512x512/result_keypoints.json'
gt_path = 'data/3mouse/annotations/val.json'
with open(result_path) as f:
res = json.load(f)
with open(gt_path) as f:
gt = json.load(f)
mydict = {}
imgid_2_filename = {}
for img in gt['images']:
image_id = img['id']
filename = img['file_name']
imgid_2_filename[image_id] = filename
for e in res:
image_id, keypoints = e['image_id'],e['keypoints']
if image_id not in mydict:
mydict[image_id] = {}
if 'res' not in mydict[image_id]:
mydict[image_id]['res'] = []
mydict[image_id]['res'].append(keypoints)
for ann in gt['annotations']:
image_id = ann['image_id']
if 'gt' not in mydict[image_id]:
mydict[image_id]['gt'] = []
mydict[image_id]['gt'].append(ann['keypoints'])
for img_id in mydict:
dts = mydict[img_id]['res']
dt_x = []
dt_y = []
gt_x = []
gt_y = []
gts = mydict[img_id]['gt']
dists = np.zeros((len(dts),len(gts)))
for (j,gt) in enumerate(gts):
g = np.array(gt)
xg = g[0::3]; yg = g[1::3];
#print(xg,yg)
for i,dt in enumerate(dts):
d = np.array(dt)
xd = d[0::3]; yd = d[1::3]
dx = xd-xg
dy = yd-yg
dist = np.sqrt(np.sum(dx**2+dy**2))
dists[i,j] = dist
preds_ids = np.argmin(dists,axis=0)
preds = []
for i,dt in enumerate(dts):
for _ in range(len(dt)//3):
preds.append(preds_ids[i])
gt_ids = []
for (id,gt) in enumerate(gts):
g = np.array(gt)
xg = g[0::3]; yg = g[1::3];
for u in xg:
gt_x.append(u)
gt_ids.append(id)
for u in yg:
gt_y.append(u)
gt_ids = np.array(gt_ids)
for (_,dt) in enumerate(dts):
d = np.array(dt)
xd = d[0::3]; yd = d[1::3]
for u in xd:
dt_x.append(u)
for u in yd:
dt_y.append(u)
point_dists = np.zeros((len(dt_x),len(gt_x)))
for i in range(len(dt_x)):
for j in range(len(gt_x)):
dx = dt_x[i] - gt_x[j]
dy = dt_y[i] - gt_y[j]
point_dists[i,j] = np.sqrt(dx**2+dy**2)
#print (point_dists)
# pointwise comparison
gts = gt_ids[np.argmin(point_dists,axis=0)]
preds = np.array(preds)
print ('gts')
print (gts)
print ('preds')
print (preds)
purity = purity_score(gts,preds)
#print (imgid_2_filename[img_id],purity)
| StarcoderdataPython |
4800247 | <filename>winguhub/group/handlers.py
from signals import grpmsg_added
from models import GroupMessage
from winguhub.notifications.models import UserNotification
from seaserv import get_group_members
def grpmsg_added_cb(sender, **kwargs):
group_id = kwargs['group_id']
from_email = kwargs['from_email']
group_members = get_group_members(int(group_id))
if len(group_members) > 15: # No need to send notification when group is
return # too large
for m in group_members:
if from_email == m.user_name:
continue
try:
UserNotification.objects.get(to_user=m.user_name,
msg_type='group_msg',
detail=group_id)
except UserNotification.DoesNotExist:
n = UserNotification(to_user=m.user_name, msg_type='group_msg',
detail=group_id)
n.save()
def grpmsg_reply_added_cb(sender, **kwargs):
msg_id = kwargs['msg_id']
reply_from_email = kwargs['from_email'] # this value may be used in future
try:
group_msg = GroupMessage.objects.get(id=msg_id)
except GroupMessage.DoesNotExist:
pass
try:
UserNotification.objects.get(to_user=group_msg.from_email,
msg_type='grpmsg_reply',
detail=msg_id)
except UserNotification.DoesNotExist:
n = UserNotification(to_user=group_msg.from_email,
msg_type='grpmsg_reply',
detail=msg_id)
n.save()
| StarcoderdataPython |
3367970 | <reponame>CFD-UTSA/Turbulence-stars
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
'''
Test functions for Cramer
'''
import numpy.testing as npt
import os
from ..statistics import Cramer_Distance
from ._testing_data import \
dataset1, dataset2, computed_data, computed_distances
def test_cramer():
tester = \
Cramer_Distance(dataset1["cube"],
dataset2["cube"],
noise_value1=0.1,
noise_value2=0.1).distance_metric(normalize=False,
verbose=True,
save_name='test.png')
os.system("rm test.png")
npt.assert_allclose(tester.data_matrix1,
computed_data["cramer_val"])
npt.assert_almost_equal(tester.distance,
computed_distances['cramer_distance'])
def test_cramer_spatial_diff():
small_data = dataset1["cube"][0][:, :26, :26]
tester2 = Cramer_Distance(small_data, dataset2["cube"])
tester2.distance_metric(normalize=False)
tester3 = Cramer_Distance(dataset2["cube"], small_data)
tester3.distance_metric(normalize=False)
npt.assert_almost_equal(tester2.distance, tester3.distance)
| StarcoderdataPython |
93800 | <gh_stars>0
"""Package npcs."""
from codemaster.models.actors.npcs.bats import (
BatBlue,
BatLilac,
BatRed,
BatBlack,
)
from codemaster.models.actors.npcs.skulls import (
SkullGreen,
SkullBlue,
SkullYellow,
SkullRed,
)
from codemaster.models.actors.npcs.ghosts import (
GhostGreen,
GhostBlue,
GhostYellow,
GhostRed,
)
from codemaster.models.actors.npcs.vampires import (
VampireMale,
VampireFemale,
)
from codemaster.models.actors.npcs.demons import (
DemonMale,
)
from codemaster.models.actors.npcs.wolfmen import (
WolfManMale,
)
from codemaster.models.actors.npcs.terminator_eyes import (
TerminatorEyeGreen,
TerminatorEyeBlue,
TerminatorEyeYellow,
TerminatorEyeRed,
)
from codemaster.models.actors.npcs.snakes import (
SnakeGreen,
SnakeBlue,
SnakeYellow,
SnakeRed,
)
| StarcoderdataPython |
1679402 | <filename>examples/tutorial_parallel/atomic_out.py
### Model
from pypdevs.DEVS import *
class TrafficLightWithOutput(AtomicDEVS):
def __init__(self):
AtomicDEVS.__init__(self, "Light")
self.state = "green"
self.observe = self.addOutPort("observer")
def intTransition(self):
state = self.state
return {"red": "green",
"yellow": "red",
"green": "yellow"}[state]
def timeAdvance(self):
state = self.state
return {"red": 60,
"yellow": 3,
"green": 57}[state]
def outputFnc(self):
state = self.state
if state == "red":
v = "green"
elif state == "yellow":
v = "red"
elif state == "green":
v = "yellow"
return {self.observe: [v]}
### Experiment
from pypdevs.simulator import Simulator
model = TrafficLightWithOutput()
sim = Simulator(model)
sim.setVerbose()
sim.setTerminationTime(500)
sim.simulate() | StarcoderdataPython |
1729788 | <reponame>petrpavlu/storepass
# Copyright (C) 2019-2020 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
"""Textual views suitable for the console."""
import storepass.model
class ListView(storepass.model.ModelVisitor):
"""View that produces a tree with one-line about each visited entry."""
def visit_root(self, root):
"""Process the database root."""
# Do nothing.
def _get_current_indent(self):
"""Obtain current indentation."""
return " " * (len(self._path) - 1)
def visit_folder(self, folder):
"""Print one-line information about a folder entry."""
indent = self._get_current_indent()
description = (f": {folder.description}"
if folder.description is not None else "")
print(f"{indent}+ {folder.name}{description}")
def visit_account(self, account):
"""Print one-line information about an account entry."""
indent = self._get_current_indent()
if storepass.model.HOSTNAME_FIELD in account.entry_fields:
address = account.properties[storepass.model.HOSTNAME_FIELD]
elif storepass.model.URL_FIELD in account.entry_fields:
address = account.properties[storepass.model.URL_FIELD]
else:
address = None
address = f" [{address}]" if address is not None else ""
description = (f": {account.description}"
if account.description is not None else "")
print(f"{indent}- {account.name}{address}{description}")
class DetailView(storepass.model.ModelVisitor):
"""View that shows detailed information about visited entries."""
def visit_entry(self, entry):
"""Print detailed information about an entry."""
print(f"+ {entry.get_full_name()} ({entry.entry_label})")
# Process the entry's description.
if entry.description is not None:
print(f" - Description: {entry.description}")
# Process entry-specific properties.
for field in entry.entry_fields:
value = entry.properties[field]
if value is not None:
print(f" - {field.label}: {value}")
# Process the entry's notes and updated timestamp.
if entry.notes is not None:
print(f" - Notes: {entry.notes}")
if entry.updated is not None:
updated = entry.updated.astimezone().strftime('%c %Z')
print(f" - Last modified: {updated}")
| StarcoderdataPython |
1726116 | import json
import string
import random
from hashlib import sha256
import logging
import requests
from proxy.request import send_request, Request
from proxy.response import Response
from resources.base import Resource
logger = logging.getLogger()
class AAFResource(Resource):
def __init__(self, service):
super().__init__(service)
self.config = service.service_definition
if self.config is None:
raise IncorrectSecurityConfigurationException("AAF ingegration is not configured")
data = self.config.get("data")
self.endpoint_id = data.get("endpoint_id")
self.endpoint_secret = data.get("endpoint_secret")
self.target_url = data.get("target_url")
def get_endpoint_hash(self, endpointId, endpoint_secret, salt):
salted_endpoint_id = (endpointId + salt).encode('utf-8')
endpoint_id_hash = sha256(salted_endpoint_id).hexdigest()
salted_endpoint_secret = (endpoint_secret + endpoint_id_hash).encode('utf-8')
return sha256(salted_endpoint_secret).hexdigest()
def create_endpoint(self, request):
logger.debug("AAF target url: {}".format(self.target_url))
try:
salt = "3a8c901ade36cd446114eb602711ccce75443c294cc3dd02e574702bb574f32f"
r = requests.post("{}/api/v1/endpoints/{}/sessions".format(self.target_url, self.endpoint_id),
data={"salt": salt,
"endpoint_secret_hash": self.get_endpoint_hash(self.endpoint_id,
self.endpoint_secret, salt),
"session_data": {}}, timeout=30, verify=False)
logger.debug("AAF returns: {}".format(r.text))
logger.debug("r.status_code: {}".format(r.status_code))
if r.status_code == 200:
return Response(r.text, headers={'Content-type': "application/json"})
elif r.status_code == 401:
'''
When the server returns a 401 it means that the client ID or
client secret are incorrect. In this case we can give a better
error message to help sort out the configuration issue.
'''
raise IncorrectSecurityConfigurationException("Unable to authenticate request")
else:
return Response(r.text, headers={'Content-type': "application/json"})
except Exception as e:
logger.exception("Failed to run AAF token checker")
raise e
| StarcoderdataPython |
1629717 | <reponame>PyGotham/rewards
from __future__ import annotations
from django.contrib.auth import get_user_model
from django.test import Client
import pytest
TEST_EMAIL = "<EMAIL>"
# pyre-ignore[16]: This is fixed by https://github.com/facebook/pyre-check/pull/256.
User = get_user_model()
@pytest.mark.django_db
# pyre-ignore[11]: This is fixed by https://github.com/facebook/pyre-check/pull/256.
def test_login_creates_new_user(client: Client) -> None:
assert not User.objects.filter(email=TEST_EMAIL)
client.post("/login", {"email": TEST_EMAIL})
assert User.objects.get(email=TEST_EMAIL)
# pyre-ignore[11]: This is fixed by https://github.com/facebook/pyre-check/pull/256.
def test_login_requires_email(client: Client) -> None:
response = client.post("/login")
assert response.status_code not in (301, 302)
assert b"this field is required" in response.content.lower()
| StarcoderdataPython |
25035 | <reponame>Alex92rus/ErrorDetectionProject
def extract_to_m2(filename, annot_triples):
"""
Extracts error detection annotations in m2 file format
Args:
filename: the output m2 file
annot_triples: the annotations of form (sentence, indexes, selections)
"""
with open(filename, 'w+') as m2_file:
for triple in annot_triples:
s_line = 'S ' + triple[0] + '\n'
m2_file.write(s_line)
for i in range(len(triple[1])):
if triple[2][i] == 1:
a_line = 'A '
if isinstance(triple[1][i], int):
a_line += str(triple[1][i]) + ' ' + str(triple[1][i] + 1)
else:
a_line += triple[1][i] + ' ' + triple[1][i]
a_line += '|||IG|||IG|||REQUIRED|||-NONE-|||1\n'
m2_file.write(a_line)
m2_file.write('\n') | StarcoderdataPython |
3254911 | <reponame>Korred/advent_of_code_2016
def improve(a):
return '{}0{}'.format(a, a[::-1].translate(str.maketrans('01', '10')))
def get_checksum(data):
size = len(data)
div = (size // 2) - 2 if (size // 2) % 2 == 0 else (size // 2) - 1
# find suitable eg. biggest even divisor (div) where quotient is odd
while True:
if size % div == 0 and (size // div % 2 != 0):
break
else:
div -= 2
new_checksum = []
# split input into div parts
for i in range(size//div):
c = data[(i * div):((i * div) + div)]
init = 0 if c.count('1')% 2 == 0 else 1
new_checksum.append(1-init)
return "".join(map(str,new_checksum))
sizes = [272, 35651584]
for disc_size in sizes:
data = "10111011111001111"
while len(data) < disc_size:
data = improve(data)
res = get_checksum(data[:disc_size])
print("Checksum for size {}: {}".format(disc_size, res))
| StarcoderdataPython |
178194 | <gh_stars>0
""" Auteur: <NAME>
Date : Mars 2020
Projet : MOOC Python 3 - France Université Numérique
Objectif:
Écrire un programme qui, si temperature (entier lu sur input correspondant à la température maximale prévue pour aujourd’hui) est strictement supérieur à 0, teste si temperature est inférieur ou égal à 10, auquel cas il imprime le texte :
Il va faire frais
et qui, si temperature n’est pas supérieur à 0, imprime le texte :
Il va faire froid
Dans les autres cas, le programme n’imprime rien.
Consignes:
Attention, nous rappelons que votre code sera évalué en fonction de ce qu’il affiche, donc veillez à n’imprimer
que le résultat attendu. En particulier, il ne faut rien écrire à l’intérieur des appels à input (int(input())
et non int(input("Entrer un nombre : ")) par exemple), ni ajouter du texte dans ce qui est imprimé (print(res)
et non print("résultat :", res) par exemple).
Faites attention d’écrire les messages à l’identique de ce qui est demandé (majuscule au début de la ligne, une espace entre chaque mot, etc) .
Un conseil pour avoir des messages identiques est de les copier de l’énoncé pour les coller dans votre code.
Lors de l’affichage des résultats, en cas d’erreur dans certains tests, UpyLaB pourra marquer :
« Le résultat attendu était : aucun résultat ». Cela voudra bien dire qu’il ne faut rien imprimer dans ce cas.
"""
temperature = int(input())
if temperature > 0 and temperature <= 10:
print ("Il va faire frais")
elif temperature <= 0:
print("Il va faire froid")
| StarcoderdataPython |
3239679 | <reponame>YuanshengZhao/adiabaticbinary
import tensorflow as tf
IMG_HEIGHT = IMG_WIDTH = 64
datagen = tf.keras.preprocessing.image.ImageDataGenerator(dtype=float,
horizontal_flip=True,
width_shift_range=.125,
height_shift_range=.125,
fill_mode="nearest",
)
def loadData(path):
ds = tf.keras.preprocessing.image_dataset_from_directory(path,
image_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=100,
color_mode= "grayscale",
interpolation='nearest'
)
# load data to memory
x_t=[]
y_t=[]
for image_batch, labels_batch in ds:
x_t.append(image_batch)
y_t.append(labels_batch)
x_t=tf.concat(x_t,axis=0)
y_t=tf.concat(y_t,axis=0)
return tf.cast(x_t,tf.float32)/255., y_t
x_train,y_train = loadData("Ddogs-vs-cats/train")
x_val,y_val = loadData("Ddogs-vs-cats/validation")
x_test,y_test = loadData("Ddogs-vs-cats/test")
pixel_mean=tf.reduce_mean(x_train,axis=0)
x_train=x_train-pixel_mean
x_test=x_test-pixel_mean
x_val=x_val-pixel_mean
print(x_train.shape, x_val.shape, x_test.shape)
| StarcoderdataPython |
1697975 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import dill
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.optim import Adam
from tensorboardX import SummaryWriter
from utils import metric_report, t2n, get_n_params
from config import BertConfig
from predictive_models import GBERT_Predict_Side
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class Voc(object):
def __init__(self):
self.idx2word = {}
self.word2idx = {}
def add_sentence(self, sentence):
for word in sentence:
if word not in self.word2idx:
self.idx2word[len(self.word2idx)] = word
self.word2idx[word] = len(self.word2idx)
class EHRTokenizer(object):
"""Runs end-to-end tokenization"""
def __init__(self, data_dir, special_tokens=("[PAD]", "[CLS]", "[MASK]")):
self.vocab = Voc()
# special tokens
self.vocab.add_sentence(special_tokens)
self.rx_voc = self.add_vocab(os.path.join(data_dir, 'rx-vocab.txt'))
self.dx_voc = self.add_vocab(os.path.join(data_dir, 'dx-vocab.txt'))
# code only in multi-visit data
self.rx_voc_multi = Voc()
self.dx_voc_multi = Voc()
with open(os.path.join(data_dir, 'rx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.rx_voc_multi.add_sentence([code.rstrip('\n')])
with open(os.path.join(data_dir, 'dx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.dx_voc_multi.add_sentence([code.rstrip('\n')])
def add_vocab(self, vocab_file):
voc = self.vocab
specific_voc = Voc()
with open(vocab_file, 'r') as fin:
for code in fin:
voc.add_sentence([code.rstrip('\n')])
specific_voc.add_sentence([code.rstrip('\n')])
return specific_voc
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab.word2idx[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.vocab.idx2word[i])
return tokens
class EHRDataset(Dataset):
def __init__(self, data_pd, tokenizer: EHRTokenizer, max_seq_len):
self.data_pd = data_pd
self.tokenizer = tokenizer
self.seq_len = max_seq_len
self.sample_counter = 0
self.side_len = len(self.data_pd.iloc[0, 5:])
logger.info('side len %d' % self.side_len)
def transform_data(data):
"""
:param data: raw data form
:return: {subject_id, [adm, 2, codes]},
"""
records = {}
side_records = {}
for subject_id in data['SUBJECT_ID'].unique():
item_df = data[data['SUBJECT_ID'] == subject_id]
patient = []
sides = []
for _, row in item_df.iterrows():
admission = [list(row['ICD9_CODE']), list(row['ATC4'])]
patient.append(admission)
sides.append(row[5:].values)
if len(patient) < 2:
continue
records[subject_id] = patient
side_records[subject_id] = sides
return records, side_records
self.records, self.side_records = transform_data(data_pd)
def __len__(self):
return len(self.records)
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
subject_id = list(self.records.keys())[item]
def fill_to_max(l, seq):
while len(l) < seq:
l.append('[PAD]')
return l
"""extract input and output tokens
"""
input_tokens = [] # (2*max_len*adm)
output_dx_tokens = [] # (adm-1, l)
output_rx_tokens = [] # (adm-1, l)
for idx, adm in enumerate(self.records[subject_id]):
input_tokens.extend(
['[CLS]'] + fill_to_max(list(adm[0]), self.seq_len - 1))
input_tokens.extend(
['[CLS]'] + fill_to_max(list(adm[1]), self.seq_len - 1))
# output_rx_tokens.append(list(adm[1]))
if idx != 0:
output_rx_tokens.append(list(adm[1]))
output_dx_tokens.append(list(adm[0]))
"""convert tokens to id
"""
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
output_dx_labels = [] # (adm-1, dx_voc_size)
output_rx_labels = [] # (adm-1, rx_voc_size)
dx_voc_size = len(self.tokenizer.dx_voc_multi.word2idx)
rx_voc_size = len(self.tokenizer.rx_voc_multi.word2idx)
for tokens in output_dx_tokens:
tmp_labels = np.zeros(dx_voc_size)
tmp_labels[list(
map(lambda x: self.tokenizer.dx_voc_multi.word2idx[x], tokens))] = 1
output_dx_labels.append(tmp_labels)
for tokens in output_rx_tokens:
tmp_labels = np.zeros(rx_voc_size)
tmp_labels[list(
map(lambda x: self.tokenizer.rx_voc_multi.word2idx[x], tokens))] = 1
output_rx_labels.append(tmp_labels)
if cur_id < 5:
logger.info("*** Example ***")
logger.info("subject_id: %s" % subject_id)
logger.info("input tokens: %s" % " ".join(
[str(x) for x in input_tokens]))
logger.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
assert len(input_ids) == (self.seq_len *
2 * len(self.records[subject_id]))
assert len(output_dx_labels) == (len(self.records[subject_id]) - 1)
# assert len(output_rx_labels) == len(self.records[subject_id])-1
"""extract side
"""
sides = self.side_records[subject_id][1:]
assert len(sides) == len(output_dx_labels)
cur_tensors = (torch.tensor(input_ids).view(-1, self.seq_len),
torch.tensor(output_dx_labels, dtype=torch.float),
torch.tensor(output_rx_labels, dtype=torch.float),
torch.tensor(sides, dtype=torch.float))
return cur_tensors
def load_dataset(args):
data_dir = args.data_dir
max_seq_len = args.max_seq_length
# load tokenizer
tokenizer = EHRTokenizer(data_dir)
# load data
data = pd.read_pickle(os.path.join(data_dir, 'data-multi-visit.pkl'))
# load side
side_pd = pd.read_pickle(os.path.join(data_dir, 'data-multi-side.pkl'))
# concat
data = data.merge(side_pd, how='inner', on=['SUBJECT_ID', 'HADM_ID'])
# load trian, eval, test data
ids_file = [os.path.join(data_dir, 'train-id.txt'),
os.path.join(data_dir, 'eval-id.txt'),
os.path.join(data_dir, 'test-id.txt')]
def load_ids(data, file_name):
"""
:param data: multi-visit data
:param file_name:
:return: raw data form
"""
ids = []
with open(file_name, 'r') as f:
for line in f:
ids.append(int(line.rstrip('\n')))
return data[data['SUBJECT_ID'].isin(ids)].reset_index(drop=True)
return tokenizer, tuple(map(lambda x: EHRDataset(load_ids(data, x), tokenizer, max_seq_len), ids_file))
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model_name", default='GBert-predict-side', type=str, required=False,
help="model name")
parser.add_argument("--data_dir",
default='../data',
type=str,
required=False,
help="The input data dir.")
parser.add_argument("--pretrain_dir", default='../saved/GBert-predict', type=str, required=False,
help="pretraining model dir.")
parser.add_argument("--train_file", default='data-multi-visit.pkl', type=str, required=False,
help="training data file.")
parser.add_argument("--output_dir",
default='../saved/',
type=str,
required=False,
help="The output directory where the model checkpoints will be written.")
# Other parameters
parser.add_argument("--use_pretrain",
default=True,
action='store_true',
help="is use pretrain")
parser.add_argument("--graph",
default=False,
action='store_true',
help="if use ontology embedding")
parser.add_argument("--therhold",
default=0.3,
type=float,
help="therhold.")
parser.add_argument("--max_seq_length",
default=55,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=True,
action='store_true',
help="Whether to run on the dev set.")
parser.add_argument("--do_test",
default=True,
action='store_true',
help="Whether to run on the test set.")
parser.add_argument("--train_batch_size",
default=1,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=40.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=1203,
help="random seed for initialization")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.model_name)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available()
and not args.no_cuda else "cpu")
if not args.do_train and not args.do_eval:
raise ValueError(
"At least one of `do_train` or `do_eval` must be True.")
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
# raise ValueError(
# "Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
print("Loading Dataset")
tokenizer, (train_dataset, eval_dataset, test_dataset) = load_dataset(args)
train_dataloader = DataLoader(train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=1)
eval_dataloader = DataLoader(eval_dataset,
sampler=SequentialSampler(eval_dataset),
batch_size=1)
test_dataloader = DataLoader(test_dataset,
sampler=SequentialSampler(test_dataset),
batch_size=1)
print('Loading Model: ' + args.model_name)
# config = BertConfig(vocab_size_or_config_json_file=len(tokenizer.vocab.word2idx), side_len=train_dataset.side_len)
# config.graph = args.graph
# model = SeperateBertTransModel(config, tokenizer.dx_voc, tokenizer.rx_voc)
if args.use_pretrain:
logger.info("Use Pretraining model")
model = GBERT_Predict_Side.from_pretrained(
args.pretrain_dir, tokenizer=tokenizer, side_len=train_dataset.side_len)
else:
config = BertConfig(
vocab_size_or_config_json_file=len(tokenizer.vocab.word2idx))
config.graph = args.graph
model = GBERT_Predict_Side(config, tokenizer, train_dataset.side_len)
logger.info('# of model parameters: ' + str(get_n_params(model)))
model.to(device)
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
rx_output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin")
# Prepare optimizer
# num_train_optimization_steps = int(
# len(train_dataset) / args.train_batch_size) * args.num_train_epochs
# param_optimizer = list(model.named_parameters())
# no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(
# nd in n for nd in no_decay)], 'weight_decay': 0.01},
# {'params': [p for n, p in param_optimizer if any(
# nd in n for nd in no_decay)], 'weight_decay': 0.0}
# ]
# optimizer = BertAdam(optimizer_grouped_parameters,
# lr=args.learning_rate,
# warmup=args.warmup_proportion,
# t_total=num_train_optimization_steps)
optimizer = Adam(model.parameters(), lr=args.learning_rate)
global_step = 0
if args.do_train:
writer = SummaryWriter(args.output_dir)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", 1)
dx_acc_best, rx_acc_best = 0, 0
acc_name = 'prauc'
dx_history = {'prauc': []}
rx_history = {'prauc': []}
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
print('')
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
prog_iter = tqdm(train_dataloader, leave=False, desc='Training')
model.train()
for _, batch in enumerate(prog_iter):
batch = tuple(t.to(device) for t in batch)
input_ids, dx_labels, rx_labels, input_sides = batch
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
dim=0), dx_labels.squeeze(dim=0), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
loss, rx_logits = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels,
epoch=global_step, input_sides=input_sides)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += 1
nb_tr_steps += 1
# Display loss
prog_iter.set_postfix(loss='%.4f' % (tr_loss / nb_tr_steps))
optimizer.step()
optimizer.zero_grad()
writer.add_scalar('train/loss', tr_loss / nb_tr_steps, global_step)
global_step += 1
if args.do_eval:
print('')
logger.info("***** Running eval *****")
model.eval()
rx_y_preds = []
rx_y_trues = []
for eval_input in tqdm(eval_dataloader, desc="Evaluating"):
eval_input = tuple(t.to(device) for t in eval_input)
input_ids, dx_labels, rx_labels, input_sides = eval_input
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
), dx_labels.squeeze(), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
with torch.no_grad():
loss, rx_logits = model(
input_ids, dx_labels=dx_labels, rx_labels=rx_labels, input_sides=input_sides)
rx_y_preds.append(t2n(torch.sigmoid(rx_logits)))
rx_y_trues.append(t2n(rx_labels))
print('')
rx_acc_container = metric_report(np.concatenate(rx_y_preds, axis=0), np.concatenate(rx_y_trues, axis=0),
args.therhold)
writer.add_scalars(
'eval_rx', rx_acc_container, global_step)
if rx_acc_container[acc_name] > rx_acc_best:
rx_acc_best = rx_acc_container[acc_name]
# save model
torch.save(model_to_save.state_dict(),
rx_output_model_file)
with open(os.path.join(args.output_dir, 'bert_config.json'), 'w', encoding='utf-8') as fout:
fout.write(model.config.to_json_string())
if args.do_test:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", 1)
def test(task=0):
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(rx_output_model_file)
model.load_state_dict(model_state_dict)
model.to(device)
model.eval()
y_preds = []
y_trues = []
for test_input in tqdm(test_dataloader, desc="Testing"):
test_input = tuple(t.to(device) for t in test_input)
input_ids, dx_labels, rx_labels, input_sides = test_input
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
), dx_labels.squeeze(), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
with torch.no_grad():
loss, rx_logits = model(
input_ids, dx_labels=dx_labels, rx_labels=rx_labels, input_sides=input_sides)
y_preds.append(t2n(torch.sigmoid(rx_logits)))
y_trues.append(t2n(rx_labels))
print('')
acc_container = metric_report(np.concatenate(y_preds, axis=0), np.concatenate(y_trues, axis=0),
args.therhold)
# save report
writer.add_scalars('test', acc_container, 0)
return acc_container
test(task=0)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3355177 | <filename>.ci/ci_script.py
#!/usr/bin/env python3
import importlib.util
import os
import subprocess
import sys
import git
import github
# Try to import, but its not critical
libnames = ['bot_jokes']
for libname in libnames:
try:
lib = __import__(libname)
except Exception:
print(sys.exc_info())
else:
globals()[libname] = lib
PWD = os.path.dirname(os.path.abspath(__file__))
GIT_DEFAULT_BRANCH = 'master'
PR_MESSAGE_BODY = os.path.join(PWD, 'pr_body.md')
def import_create_dockerfiles(location_dir):
"""Import the dockerfile generation script"""
location = os.path.join(location_dir, 'create_dockerfiles.py')
spec = importlib.util.spec_from_file_location(
name='create.dockerfiles', location=location)
create_dockerfiles = importlib.util.module_from_spec(spec)
spec.loader.exec_module(create_dockerfiles)
return create_dockerfiles
def import_create_dockerlibrary(location_dir):
"""Import the dockerlibrary generation script"""
location = os.path.join(location_dir, 'create_dockerlibrary.py')
spec = importlib.util.spec_from_file_location(
name='create.dockerlibrary', location=location)
create_dockerlibrary = importlib.util.module_from_spec(spec)
spec.loader.exec_module(create_dockerlibrary)
return create_dockerlibrary
def test_diffs(diffs):
"""Check the diffs, also print them and fail the test if they exist"""
if diffs:
for diff in diffs:
print(diff)
raise ValueError('Autogenerated files are not up to date')
def test_builds(hub_tag_dir):
"""Check make build completes for the given repo tag directory"""
command = ['make', 'build']
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
cwd=hub_tag_dir,
bufsize=1,
universal_newlines=True) as p:
for line in p.stdout:
print(line, end='') # process line here
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, p.args)
def main(argv=sys.argv[1:]):
"""Check CI context and trigger docker builds"""
# Public environment variables, available for pull requests from forks
HUB_REPO = os.environ['HUB_REPO']
HUB_RELEASE = os.environ['HUB_RELEASE']
HUB_OS_NAME = os.environ['HUB_OS_NAME']
HUB_OS_CODE_NAME = os.environ['HUB_OS_CODE_NAME']
if 'GITHUB_ACTIONS' in os.environ:
CI_EVENT_TYPE = os.environ['GITHUB_EVENT_NAME']
GIT_BRANCH = os.environ['GITHUB_REF'] # ?
github_ref_prefix = 'refs/heads/'
if GIT_BRANCH.startswith(github_ref_prefix):
GIT_BRANCH = GIT_BRANCH[len(github_ref_prefix):]
GIT_UPSTREAM_REPO_SLUG = os.environ['GITHUB_REPOSITORY']
GIT_BUILD_DIR = os.environ['GITHUB_WORKSPACE']
GIT_PULL_REQUEST_BRANCH = os.environ['GITHUB_HEAD_REF'] # ?
GIT_PULL_REQUEST_BASE_BRANCH = os.environ['GITHUB_BASE_REF'] # ?
elif 'TRAVIS' in os.environ:
CI_EVENT_TYPE = os.environ['TRAVIS_EVENT_TYPE']
GIT_BRANCH = os.environ['TRAVIS_BRANCH']
GIT_UPSTREAM_REPO_SLUG = os.environ['TRAVIS_REPO_SLUG']
GIT_BUILD_DIR = os.environ['TRAVIS_BUILD_DIR']
GIT_PULL_REQUEST_BRANCH = os.environ['TRAVIS_PULL_REQUEST_BRANCH']
GIT_PULL_REQUEST_BASE_BRANCH = GIT_BRANCH
print("HUB_REPO: ", HUB_REPO)
print("HUB_RELEASE: ", HUB_RELEASE)
print("HUB_OS_NAME: ", HUB_OS_NAME)
print("HUB_OS_CODE_NAME: ", HUB_OS_CODE_NAME)
print("GIT_UPSTREAM_REPO_SLUG: ", GIT_UPSTREAM_REPO_SLUG)
print("GIT_BRANCH: ", GIT_BRANCH)
print("GIT_PULL_REQUEST_BRANCH: ", GIT_PULL_REQUEST_BRANCH)
# Private environment variables, not available for pull requests from forks
GIT_USER = os.environ.get('GITHUBUSER', '')
GIT_EMAIL = os.environ.get('GITHUBEMAIL', '')
GIT_TOKEN = os.environ.get('GITHUBTOKEN', '')
GIT_ORIGIN_REPO_SLUG = GIT_USER + '/' + GIT_UPSTREAM_REPO_SLUG.split('/')[1]
GIT_REMOTE_ORIGIN_TOKEN_URL = "https://{user}:{token}@github.com/{repo_slug}.git".format(
user=GIT_USER,
token=GIT_TOKEN,
repo_slug=GIT_ORIGIN_REPO_SLUG
)
# Initialize git interfaces
repo = git.Repo(GIT_BUILD_DIR, odbt=git.GitCmdObjectDB)
# Expand the repo:tag directory
hub_repo_dir = os.path.join(GIT_BUILD_DIR, HUB_REPO)
hub_tag_dir = os.path.join(
hub_repo_dir, HUB_RELEASE, HUB_OS_NAME, HUB_OS_CODE_NAME)
hub_relative_path = os.path.join(
HUB_REPO, HUB_RELEASE, HUB_OS_NAME, HUB_OS_CODE_NAME)
# Try updating the Dockerfiles
create_dockerfiles = import_create_dockerfiles(hub_repo_dir)
create_dockerfiles.main(('dir', '-d' + hub_tag_dir))
# Create diff for the current repo
diffs = repo.index.diff(None, create_patch=True)
# Check if this is PR or Cron job test
if CI_EVENT_TYPE == 'pull_request':
print("Testing Pull Request for Branch: ", GIT_PULL_REQUEST_BRANCH)
# Test that dockerfile generation has changed nothing
# and that all dockerfiles are up to date
test_diffs(diffs)
# have to invoke git diff on the command line
# because Github Actions gives detached clone for forked repos
pr_diff_cmd = f'git diff remotes/origin/{GIT_PULL_REQUEST_BASE_BRANCH} --name-only'
pr_diff_return = subprocess.Popen(
pr_diff_cmd,
shell=True,
stdout=subprocess.PIPE)
file_list = pr_diff_return.stdout.read().decode().strip().split('\n')
# If files corresponding to this distro/platform have changed, test building the images
for file_path in file_list:
if file_path.startswith(hub_relative_path):
test_builds(hub_tag_dir)
else:
# If this is a test from CronJob or push
print("Testing Branch: ", GIT_BRANCH)
try:
# Test that dockerfile generation has changed nothing
# and that all dockerfiles are up to date
test_diffs(diffs)
except ValueError:
# If there are changes, only proceed for the default branch
if GIT_BRANCH != GIT_DEFAULT_BRANCH:
raise
print("GIT_BRANCH is default branch, proceeding...")
# Initialize github interfaces
g = github.Github(login_or_token=GIT_TOKEN)
g_origin_repo = g.get_repo(
full_name_or_id=GIT_ORIGIN_REPO_SLUG)
g_upstream_repo = g.get_repo(
full_name_or_id=GIT_UPSTREAM_REPO_SLUG)
# Define common attributes for new PR
pr_branch_name = hub_relative_path
pr_head_name = GIT_USER + ':' + pr_branch_name
pr_remote = git.remote.Remote(repo=repo, name='origin')
pr_remote.add(repo=repo, name='upstream_pr',
url=GIT_REMOTE_ORIGIN_TOKEN_URL)
# Commit changes to Dockerfiles
repo.git.add(all=True)
message = "Updating Dockerfiles\n" + \
"This is an automated CI commit"
repo.config_writer().set_value(
"user", "name", GIT_USER).release()
repo.config_writer().set_value(
"user", "email", GIT_EMAIL).release()
repo.git.commit(m=message)
# Update the Docker Library
manifest = os.path.join(hub_repo_dir, 'manifest.yaml')
output = os.path.join(hub_repo_dir, HUB_REPO)
create_dockerlibrary = import_create_dockerlibrary(
hub_repo_dir)
create_dockerlibrary.main((
'--manifest', manifest,
'--output', output))
# Check for changes to the Docker Library
library_diff = repo.index.diff(None, create_patch=True)
if library_diff != []:
# Commit changes to Docker Library
repo.git.add(all=True)
message = "Updating Docker Library\n" + \
"This is an automated CI commit"
repo.git.commit(m=message)
# Create new branch from current head
pr_branch_head = repo.create_head(pr_branch_name) # noqa
# Check if branch exists remotely
try:
g_branch = g_origin_repo.get_branch(branch=pr_branch_name) # noqa
except github.GithubException as exception:
if exception.data['message'] == "Branch not found":
pr_branch_exists = False
else:
raise
else:
pr_branch_exists = True
if pr_branch_exists:
# Try force pushing if remote branch already exists
try:
repo.git.push(
'upstream_pr', pr_branch_name + ':' + pr_branch_name, force=True)
except Exception as inst:
inst.stderr = None
raise ValueError(
("Force push to branch:{branch} failed! "
"Stderr omitted to protect secrets.").format(branch=pr_branch_name))
else:
# Otherwise try setting up the remote upsteam branch
try:
repo.git.push(
'upstream_pr', pr_branch_name + ':' + pr_branch_name, u=True)
except Exception as inst:
inst.stderr = None
raise ValueError(
("Set-upstream push to branch:'{branch}' failed! "
"Stderr omitted to protect secrets.").format(branch=pr_branch_name))
# Add some commentary for new PR
title = "Updating {}".format(pr_branch_name)
with open(PR_MESSAGE_BODY, 'r') as f:
body = f.read()
try:
body += bot_jokes.get_bot_joke()
except Exception:
pass
# Get github pull for upstream
g_pulls = g_upstream_repo.get_pulls(
state='open',
sort='created',
base=GIT_BRANCH,
head=pr_head_name)
# Check if PR already exists
if g_pulls.get_page(0):
raise ValueError(
("Relevant PR from {pr_head_name} "
"is already open.").format(pr_head_name=pr_head_name))
else:
# Create new PR for remote banch
g_upstream_repo.create_pull(
title=title,
body=body,
base=GIT_BRANCH,
head=pr_head_name)
raise ValueError(
("Relevant PR from {pr_head_name} "
"has been created.").format(pr_head_name=pr_head_name))
# Test that the dockerfiles build
test_builds(hub_tag_dir)
if __name__ == '__main__':
main()
| StarcoderdataPython |
180824 | <reponame>yilin-lu/bungo-bot-DEPRECATED-
from nonebot import on_notice, NoticeSession
from nonebot.log import logger
from .utils import *
@on_notice
async def _(session: NoticeSession):
logger.info('new notice: %s', session.event)
@on_notice('group_decrease')
async def _(session: NoticeSession):
msg = await load_admin_data("group_decrease")
await session.send(msg)
return
@on_notice('group_increase.invite')
async def _(session: NoticeSession):
if session.event['self_id'] == session.event['user_id']:
msg = await load_admin_data("add_group")
else:
msg = await load_admin_data("group_increase")
await session.send(msg)
return | StarcoderdataPython |
1731837 | # Copyright 2017 Sidewalk Labs | https://www.apache.org/licenses/LICENSE-2.0
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from collections import defaultdict, namedtuple
import numpy as np
import pandas
from doppelganger.listbalancer import (
balance_multi_cvx, discretize_multi_weights
)
from doppelganger import inputs
HIGH_PASS_THRESHOLD = .1 # Filter controls which are present in less than 10% of HHs
# These are the minimum fields needed to allocate households
DEFAULT_PERSON_FIELDS = {
inputs.STATE,
inputs.PUMA,
inputs.SERIAL_NUMBER,
inputs.AGE,
inputs.SEX,
inputs.PERSON_WEIGHT,
}
DEFAULT_HOUSEHOLD_FIELDS = {
inputs.STATE,
inputs.PUMA,
inputs.SERIAL_NUMBER,
inputs.NUM_PEOPLE,
inputs.HOUSEHOLD_WEIGHT,
}
CountInformation = namedtuple('CountInformation', ['tract', 'count'])
class HouseholdAllocator(object):
@staticmethod
def from_csvs(households_csv, persons_csv):
"""Load saved household and person allocations.
Args:
households_csv (unicode): path to households file
persons_csv (unicode): path to persons file
Returns:
HouseholdAllocator: allocated persons & households_csv
"""
allocated_households = pandas.read_csv(households_csv)
allocated_persons = pandas.read_csv(persons_csv)
return HouseholdAllocator(allocated_households, allocated_persons)
@staticmethod
def from_cleaned_data(marginals, households_data, persons_data):
"""Allocate households based on the given data.
marginals (Marginals): controls to match when allocating
households_data (CleanedData): data about households. Must contain
DEFAULT_HOUSEHOLD_FIELDS.
persons_data (CleanedData): data about persons. Must contain
DEFAULT_PERSON_FIELDS.
"""
for field in DEFAULT_HOUSEHOLD_FIELDS:
assert field.name in households_data.data, \
'Missing required field {}'.format(field.name)
for field in DEFAULT_PERSON_FIELDS:
assert field.name in persons_data.data, \
'Missing required field {}'.format(field.name)
households, persons = HouseholdAllocator._format_data(
households_data.data, persons_data.data)
allocated_households, allocated_persons = \
HouseholdAllocator._allocate_households(households, persons, marginals)
return HouseholdAllocator(allocated_households, allocated_persons)
def __init__(self, allocated_households, allocated_persons):
self.allocated_households = allocated_households
self.allocated_persons = allocated_persons
self.serialno_to_counts = defaultdict(list)
for _, row in self.allocated_households.iterrows():
serialno = row[inputs.SERIAL_NUMBER.name]
tract = row[inputs.TRACT.name]
count = int(row[inputs.COUNT.name])
self.serialno_to_counts[serialno].append(CountInformation(tract, count))
def get_counts(self, serialno):
"""Return the information about weights for a given serial number.
A household is repeated for a certain number of times for each tract.
This returns a list of (tract, repeat count). The repeat count
indicates the number of times this serial number should be repeated in
this tract.
Args:
seriano (unicode): the household's serial number
Returns:
list(CountInformation): the weighted repetitions for this serialno
"""
return self.serialno_to_counts[serialno]
def write(self, household_file, person_file):
"""Write allocated households and persons to the given files
Args:
household_file (unicode): path to write households to
person_file (unicode): path to write persons to
"""
self.allocated_households.to_csv(household_file)
self.allocated_persons.to_csv(person_file)
@staticmethod
def _filter_sparse_columns(df, cols):
''' Filter out variables who are are so sparse they would break the solver.
Columns are assumed to be of an indicator type (0/1)
Args
df (pandas.DataFrame): dataframe to filter
cols (list(str)): column names
Returns
filtered column list (list(str))
'''
return df[cols]\
.loc[:, df[cols].sum()/float(len(df)) > HIGH_PASS_THRESHOLD]\
.columns.tolist()
@staticmethod
def _allocate_households(households, persons, tract_controls):
# Only take nonzero weights
households = households[households[inputs.HOUSEHOLD_WEIGHT.name] > 0]
# Initial weights from PUMS
w = households[inputs.HOUSEHOLD_WEIGHT.name].as_matrix().T
allocation_inputs = [inputs.NUM_PEOPLE, inputs.NUM_VEHICLES] # Hard-coded for now
# Prepend column name to bin name to prevent bin collision
hh_columns = []
for a_input in allocation_inputs:
subset_values = households[a_input.name].unique().tolist()
hh_columns += HouseholdAllocator._str_broadcast(a_input.name, subset_values)
hh_columns = HouseholdAllocator._filter_sparse_columns(households, hh_columns)
hh_table = households[hh_columns].as_matrix()
A = tract_controls.data[hh_columns].as_matrix()
n_tracts, n_controls = A.shape
n_samples = len(households.index.values)
# Control importance weights
# < 1 means not important (thus relaxing the constraint in the solver)
mu = np.mat([1] * n_controls)
w_extend = np.tile(w, (n_tracts, 1))
mu_extend = np.mat(np.tile(mu, (n_tracts, 1)))
B = np.mat(np.dot(np.ones((1, n_tracts)), A)[0])
# Our trade-off coefficient gamma
# Low values (~1) mean we trust our initial weights, high values
# (~10000) mean want to fit the marginals.
gamma = 100.
# Meta-balancing coefficient
meta_gamma = 100.
hh_weights = balance_multi_cvx(
hh_table, A, B, w_extend, gamma * mu_extend.T, meta_gamma
)
# We're running discretization independently for each tract
tract_ids = tract_controls.data['TRACTCE'].values
total_weights = np.zeros(hh_weights.shape)
sample_weights_int = hh_weights.astype(int)
discretized_hh_weights = discretize_multi_weights(hh_table, hh_weights)
total_weights = sample_weights_int + discretized_hh_weights
# Extend households and add the weights and ids
households_extend = pandas.concat([households] * n_tracts)
households_extend[inputs.COUNT.name] = total_weights.flatten().T
tracts = np.repeat(tract_ids, n_samples)
households_extend[inputs.TRACT.name] = tracts
return households_extend, persons
@staticmethod
def _str_broadcast(string, list1):
return ['_'.join([string, element]) for element in list1]
@staticmethod
def _format_data(households_data, persons_data):
hh_size = pandas.get_dummies(households_data[inputs.NUM_PEOPLE.name])
# Prepend column name to bin name to prevent bin collision
hh_size.columns = HouseholdAllocator\
._str_broadcast(inputs.NUM_PEOPLE.name, hh_size.columns.tolist())
hh_vehicles = pandas.get_dummies(households_data[inputs.NUM_VEHICLES.name])
hh_vehicles.columns = HouseholdAllocator\
._str_broadcast(inputs.NUM_VEHICLES.name, hh_vehicles.columns.tolist())
households_data = pandas.concat([households_data, hh_size, hh_vehicles], axis=1)
hp_ages = pandas.get_dummies(persons_data[inputs.AGE.name])
hp_ages.columns = HouseholdAllocator\
._str_broadcast(inputs.AGE.name, list(inputs.AGE.possible_values))
persons_data = pandas.concat([persons_data, hp_ages], axis=1)
persons_trimmed = persons_data[[
inputs.SERIAL_NUMBER.name
] + hp_ages.columns.tolist()
]
# Get counts we need
persons_trimmed = persons_trimmed.groupby(inputs.SERIAL_NUMBER.name).sum()
households_trimmed = households_data[[
inputs.SERIAL_NUMBER.name,
inputs.NUM_PEOPLE.name,
inputs.NUM_VEHICLES.name,
inputs.HOUSEHOLD_WEIGHT.name
] + hh_size.columns.tolist()
+ hh_vehicles.columns.tolist()
]
# Merge
households_out = pandas.merge(
households_trimmed, persons_trimmed, how='inner',
left_on=inputs.SERIAL_NUMBER.name, right_index=True, sort=True
)
persons_out = persons_data[[
inputs.SERIAL_NUMBER.name,
inputs.SEX.name,
inputs.AGE.name
]]
return households_out, persons_out
| StarcoderdataPython |
3246520 | from pathlib import Path
import numpy as np
import tensorflow as tf
from src.helpers import paths
from src.regnet import regnet
PRETRIAN_MODEL_PATH = paths.checkpoints.regnet().parent.with_name(
'training.ckpt')
WEITGHTS_PATH = paths.models.regnet_tf()
config = paths.config.read(paths.config.regnet())
BETA1 = float(config['HYPERPARAMETERS']['BETA1'])
BETA2 = float(config['HYPERPARAMETERS']['BETA2'])
EPSILON = float(config['HYPERPARAMETERS']['EPSILON'])
LEARNING_RATE = float(config['HYPERPARAMETERS']['LEARNING_RATE'])
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(str(frozen_graph_filename), "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def=graph_def, name="")
return graph
def load_tensorflow_weights(model, checkpoint_path):
graph = load_graph(WEITGHTS_PATH.joinpath('regnet_frozen.pb'))
sess_regnet = tf.Session(graph=graph)
ckpt_vars = [var for op in graph.get_operations() for var in op.values()]
ckpt_vars = [
var for var in ckpt_vars
if 'weights:0' in var.name or 'biases:0' in var.name
]
ckpt_vars = np.array(
list(filter(lambda x: not x.name.startswith('train_opt'), ckpt_vars)))
ckpt_vars = zip_weight_bias(ckpt_vars)
for weights, biases in ckpt_vars:
assign_layer_var(model, checkpoint_path, weights, biases, sess_regnet)
assert_weights(model, ckpt_vars, checkpoint_path, sess_regnet)
def assert_weights(model, ckpt_vars, checkpoint_path, sess_regnet):
for weights, biases in ckpt_vars:
layer_name = Path(weights.name).parent.name
model_weights, model_biases = model.get_layer(layer_name).get_weights()
ckpt_weights = sess_regnet.run(weights)
ckpt_biases = sess_regnet.run(biases)
assert np.array_equal(model_weights, ckpt_weights)
assert np.array_equal(model_biases, ckpt_biases)
def zip_weight_bias(array):
weights = np.array(list(filter(lambda x: 'weights' in x.name, array)))
weights = np.array(weights)
biases = np.array(list(filter(lambda x: 'biases' in x.name, array)))
biases = np.array(biases)
zipped_array = np.column_stack((weights, biases))
for weights, biases in zipped_array:
weights_layer_name = Path(weights.name).parent
biases_layer_name = Path(biases.name).parent
assert weights_layer_name == biases_layer_name
return zipped_array
def assign_layer_var(model, checkpoint_path, weight, biases, sess_regnet):
layer_name = Path(weight.name).parent.name
weights_var = sess_regnet.run(weight)
biases_var = sess_regnet.run(biases)
model.get_layer(layer_name).set_weights([weights_var, biases_var])
def get_variable_by_name(checkpoint_path, name):
return tf.train.load_variable(str(checkpoint_path), name)
if __name__ == '__main__':
net = regnet.Regnet(LEARNING_RATE, BETA1, BETA2, EPSILON)
net.model.compile(
optimizer=net.train_opt, loss=net.model_loss, metrics=net.metrics)
load_tensorflow_weights(net.model, PRETRIAN_MODEL_PATH)
net.model.save('training.h5') # creates a HDF5 file 'my_model.h5'
| StarcoderdataPython |
4813750 | <gh_stars>0
import urllib.request,json
from .models import Articles, Source
# Getting api key
api_key = None
# Getting the news base url
base_url = None
category_articles_url = None
search_url = None
categories_url = None
source_url = None
# Getting api key an source links
def configure_request(app):
global api_key,base_url, categories_url, search_url, source_url, category_articles_url
api_key = app.config['NEWSAPP_API_KEY']
base_url = app.config['TOPHEADLINE_API_BASE_URL']
categories_url = app.config['CATEGORIES_API_URL']
search_url = app.config['SEARCH_ITEM_URL']
source_url = app.config['SOURCE_API_URL']
category_articles_url = app.config['ARTICLE_BY_CATEGORY']
def get_news_articles(default):
'''
Function that gets the json response to our url request
'''
get_articles_url = search_url.format(default,api_key)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
articles_results = None
if get_articles_response['articles']:
articles_results_list = get_articles_response['articles']
articles_results = process_results(articles_results_list)
return articles_results
def search_article(article_query):
search_article_url =search_url.format(article_query, api_key)
with urllib.request.urlopen(search_article_url) as url:
search_article_data = url.read()
search_article_response = json.loads(search_article_data)
search_article_results = None
if search_article_response['articles']:
search_article_list = search_article_response['articles']
search_article_results = process_results(search_article_list)
return search_article_results
def process_results(article_list):
'''
'''
article_results = []
for article_item in article_list:
title = article_item.get('title')
image = article_item.get('urlToImage')
description = article_item.get('description')
author = article_item.get('author')
url = article_item.get('url')
publishedAt = article_item.get('publishedAt')
content = article_item.get('content')
if image:
article_object = Articles(title, image, description, author, url, publishedAt, content)
article_results.append(article_object)
return article_results
def article_categories(category):
sourceCategories_url = categories_url.format(category,api_key)
with urllib.request.urlopen(sourceCategories_url) as url:
source_categories_data = url.read()
source_categories_response = json.loads(source_categories_data)
source_categories_results = None
if source_categories_response['sources']:
source_categories_list = source_categories_response['sources']
source_categories_results = process_category_results(source_categories_list)
return source_categories_results
def process_category_results(category_list):
'''
'''
category_results = []
for category_item in category_list:
id = category_item.get('id')
name = category_item.get('name')
category = category_item.get('category')
description = category_item.get('description')
language = category_item.get('language')
url = category_item.get('url')
category_object = Source(id, name, category, description, language, url)
category_results.append(category_object)
return category_results
def article_by_source(source):
source_articles_url = source_url.format(source,api_key)
with urllib.request.urlopen(source_articles_url) as url:
source_articles_data = url.read()
source_articles_response = json.loads(source_articles_data)
source_articles_results = None
if source_articles_response['articles']:
source_articles_list = source_articles_response['articles']
source_articles_results = process_results(source_articles_list)
return source_articles_results
def article_by_category(source):
category_articles = category_articles_url.format(source,api_key)
with urllib.request.urlopen(category_articles) as url:
category_articles_data = url.read()
category_articles_response = json.loads(category_articles_data)
category_articles_results = None
if category_articles_response['articles']:
category_articles_list = category_articles_response['articles']
category_articles_results = process_results(category_articles_list)
return category_articles_results
| StarcoderdataPython |
4811835 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 2 09:37:08 2021
@author: r.dewinter
"""
from testFunctions.BNH import BNH
from testFunctions.CTP1 import CTP1
from testFunctions.OSY import OSY
from testFunctions.CEXP import CEXP
from testFunctions.C3DTLZ4 import C3DTLZ4
from testFunctions.TNK import TNK
from testFunctions.SRN import SRN
from testFunctions.TBTD import TBTD
from testFunctions.SRD import SRD
from testFunctions.WB import WB
from testFunctions.DBD import DBD
from testFunctions.NBP import NBP
from testFunctions.SPD import SPD
from testFunctions.CSI import CSI
from testFunctions.WP import WP
from testFunctions.BICOP1 import BICOP1
from testFunctions.BICOP2 import BICOP2
from testFunctions.TRICOP import TRICOP
from hypervolume import hypervolume
import numpy as np
from pymoo.model.problem import Problem
from pymoo.optimize import minimize
import autograd.numpy as anp
from pycheapconstr.algorithms.sansga2 import SANSGA2
from pycheapconstr.algorithms.icsansga2 import ICSANSGA2
import json
sansga = {}
icsansga = {}
class OSY_c(Problem):
def __init__(self):
super().__init__(n_var=6, n_obj=2, n_constr=6)
self.xl = anp.array([0.0,0.0,1.0,0.0,1.0,0.0])
self.xu = anp.array([10.0,10.0,5.0,6.0,5.0,10.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = OSY(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = OSY(x)
out["F"] = F
out["G"] = G
class OSY_c_withcheapconstraints(OSY_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
OSYhv = []
ref = np.array([0,386])
for i in range(10):
problem = OSY_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2(n_offsprings=10)
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
OSYhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(OSYhv))
sansga['OSY'] = OSYhv
OSYhv2 = []
for i in range(10):
problem = OSY_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
OSYhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(OSYhv2))
icsansga['OSYhv'] = OSYhv2
class NBP_c(Problem):
def __init__(self):
super().__init__(n_var=2, n_obj=2, n_constr=5)
self.xl = anp.array([20.0, 10.0])
self.xu = anp.array([250.0, 50.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = NBP(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = NBP(x)
out["F"] = F
out["G"] = G
class NBP_c_withcheapconstraints(NBP_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
NBPhv = []
ref = np.array([11150, 12500])
for i in range(10):
problem = NBP_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
NBPhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(NBPhv))
sansga['NBP'] = NBPhv
NBPhv2 = []
for i in range(10):
problem = NBP_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
NBPhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(NBPhv2))
icsansga['NBPhv'] = NBPhv2
class BNH_c(Problem):
def __init__(self):
super().__init__(n_var=2, n_obj=2, n_constr=2)
self.xl = anp.array([0.0,0.0])
self.xu = anp.array([5.0,3.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = BNH(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = BNH(x)
out["F"] = F
out["G"] = G
class BNH_c_withcheapconstraints(BNH_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
BNHhv = []
ref = np.array([140,50])
for i in range(10):
problem = BNH_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
BNHhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(BNHhv))
sansga['BNH'] = BNHhv
BNHhv2 = []
for i in range(10):
problem = BNH_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
BNHhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(BNHhv2))
icsansga['BNHhv'] = BNHhv2
class CEXP_c(Problem):
def __init__(self):
super().__init__(n_var=2, n_obj=2, n_constr=2)
self.xl = anp.array([0.1,0.0])
self.xu = anp.array([1.0,5.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = CEXP(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = CEXP(x)
out["F"] = F
out["G"] = G
class CEXP_c_withcheapconstraints(CEXP_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
CEXPhv = []
ref = np.array([1,9])
for i in range(10):
problem = CEXP_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
CEXPhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(CEXPhv))
sansga['CEXP'] = CEXPhv
CEXPhv2 = []
for i in range(10):
problem = CEXP_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
CEXPhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(CEXPhv2))
icsansga['CEXPhv'] = CEXPhv2
class SRN_c(Problem):
def __init__(self):
super().__init__(n_var=2, n_obj=2, n_constr=2)
self.xl = anp.array([-20.0,-20.0])
self.xu = anp.array([20.0, 20.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = SRN(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = SRN(x)
out["F"] = F
out["G"] = G
class SRN_c_withcheapconstraints(SRN_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
SRNhv = []
ref = np.array([301,72])
for i in range(10):
problem = SRN_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
SRNhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(SRNhv))
sansga['SRN'] = SRNhv
SRNhv2 = []
for i in range(10):
problem = SRN_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
SRNhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(SRNhv2))
icsansga['SRNhv'] = SRNhv2
class TNK_c(Problem):
def __init__(self):
super().__init__(n_var=2, n_obj=2, n_constr=2)
self.xl = anp.array([1e-5,1e-5])
self.xu = anp.array([np.pi, np.pi])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = TNK(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = TNK(x)
out["F"] = F
out["G"] = G
class TNK_c_withcheapconstraints(TNK_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
TNKhv = []
ref = np.array([3,3])
for i in range(10):
problem = TNK_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
TNKhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(TNKhv))
sansga['TNK'] = TNKhv
TNKhv2 = []
for i in range(10):
problem = TNK_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), se2ed=i, verbose=True)
TNKhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(TNKhv2))
icsansga['TNKhv'] = TNKhv2
class CTP1_c(Problem):
def __init__(self):
super().__init__(n_var=2, n_obj=2, n_constr=2)
self.xl = anp.array([0.0,0.0])
self.xu = anp.array([1.0,1.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = CTP1(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = CTP1(x)
out["F"] = F
out["G"] = G
class CTP1_c_withcheapconstraints(CTP1_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
CTP1hv = []
ref = np.array([1,2])
for i in range(10):
problem = CTP1_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
CTP1hv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(CTP1hv))
sansga['CTP1'] = CTP1hv
CTP1hv2 = []
for i in range(10):
problem = CTP1_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
CTP1hv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(CTP1hv2))
icsansga['CTP1hv'] = CTP1hv2
class WB_c(Problem):
def __init__(self):
super().__init__(n_var=4, n_obj=2, n_constr=5)
self.xl = anp.array([0.125, 0.1, 0.1, 0.125])
self.xu = anp.array([5.0, 10.0, 10.0, 5.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = WB(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = WB(x)
out["F"] = F
out["G"] = G
fe = 0
class WB_c_withcheapconstraints(WB_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
global fe
fe += 1
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
WBhv = []
ref = np.array([350,0.1])
for i in range(10):
problem = WB_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
WBhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(WBhv))
sansga['WB'] = WBhv
WBhv2 = []
for i in range(10):
problem = WB_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
WBhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(WBhv2))
icsansga['WBhv'] = WBhv2
print(fe/10)
class TBTD_c(Problem):
def __init__(self):
super().__init__(n_var=3, n_obj=2, n_constr=3)
self.xl = anp.array([1.0,0.0005,0.0005])
self.xu = anp.array([3.0,0.05,0.05])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = TBTD(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = TBTD(x)
out["F"] = F
out["G"] = G
class TBTD_c_withcheapconstraints(TBTD_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
TBTDhv = []
ref = np.array([0.1,50000])
for i in range(10):
problem = TBTD_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
TBTDhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(TBTDhv))
sansga['TBTD'] = TBTDhv
TBTDhv2 = []
for i in range(10):
problem = TBTD_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
TBTDhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(TBTDhv2))
icsansga['TBTDhv'] = TBTDhv2
class DBD_c(Problem):
def __init__(self):
super().__init__(n_var=4, n_obj=2, n_constr=5)
self.xl = anp.array([55.0, 75.0, 500.0, 2.0])
self.xu = anp.array([80.0, 110.0, 3000.0, 20.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = DBD(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = DBD(x)
out["F"] = F
out["G"] = G
class DBD_c_withcheapconstraints(DBD_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
DBDhv = []
ref = np.array([5,50])
for i in range(10):
problem = DBD_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
DBDhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(DBDhv))
sansga['DBD'] = DBDhv
DBDhv2 = []
for i in range(10):
problem = DBD_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
DBDhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(DBDhv2))
icsansga['DBDhv'] = DBDhv2
class WP_c(Problem):
def __init__(self):
super().__init__(n_var=3, n_obj=5, n_constr=7)
self.xl = anp.array([0.01, 0.01, 0.01])
self.xu = anp.array([0.45, 0.1, 0.1])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = WP(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = WP(x)
out["F"] = F
out["G"] = G
class WP_c_withcheapconstraints(WP_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
WPhv = []
ref = np.array([83000, 1350, 2.85, 15989825, 25000])
for i in range(10):
problem = WP_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2(n_offsprings=10)
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
WPhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(WPhv))
sansga['WP'] = WPhv
WPhv2 = []
for i in range(10):
problem = WP_c_withcheapconstraints()
algorithm = ICSANSGA2(n_offsprings=10)
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
WPhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(WPhv2))
icsansga['WPhv'] = WPhv2
class C3DTLZ4_c(Problem):
def __init__(self):
super().__init__(n_var=6, n_obj=2, n_constr=2)
self.xl = anp.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.xu = anp.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = C3DTLZ4(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = C3DTLZ4(x)
out["F"] = F
out["G"] = G
fe = 0
class C3DTLZ4_c_withcheapconstraints(C3DTLZ4_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
global fe
fe += 1
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
C3DTLZ4hv = []
ref = np.array([3,3])
for i in range(10):
problem = C3DTLZ4_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
C3DTLZ4hv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(C3DTLZ4hv))
sansga['C3DTLZ4'] = C3DTLZ4hv
C3DTLZ4hv2 = []
for i in range(10):
problem = C3DTLZ4_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
C3DTLZ4hv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(C3DTLZ4hv2))
icsansga['C3DTLZ4hv'] = C3DTLZ4hv2
print(fe/10)
class SPD_c(Problem):
def __init__(self):
super().__init__(n_var=6, n_obj=3, n_constr=9)
self.xl = anp.array([150.0, 25.0, 12.0, 8.0, 14.0, 0.63])
self.xu = anp.array([274.32, 32.31, 22.0, 11.71, 18.0, 0.75])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = SPD(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = SPD(x)
out["F"] = F
out["G"] = G
class SPD_c_withcheapconstraints(SPD_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
SPDhv = []
ref = np.array([16,19000,-260000])
for i in range(10):
problem = SPD_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2(n_offsprings=10)
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
SPDhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(SPDhv))
sansga['SPD'] = SPDhv
SPDhv2 = []
for i in range(10):
problem = SPD_c_withcheapconstraints()
algorithm = ICSANSGA2(n_offsprings=10)
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
SPDhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(SPDhv2))
icsansga['SPDhv'] = SPDhv2
class CSI_c(Problem):
def __init__(self):
super().__init__(n_var=7, n_obj=3, n_constr=10)
self.xl = anp.array([0.5, 0.45, 0.5, 0.5, 0.875, 0.4, 0.4])
self.xu = anp.array([1.5, 1.35, 1.5, 1.5, 2.625, 1.2, 1.2])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = CSI(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = CSI(x)
out["F"] = F
out["G"] = G
class CSI_c_withcheapconstraints(CSI_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
CSIhv = []
ref = np.array([42,4.5,13])
for i in range(10):
problem = CSI_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2(n_offsprings=20)
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
CSIhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(CSIhv))
sansga['CSI'] = CSIhv
CSIhv2 = []
for i in range(10):
problem = CSI_c_withcheapconstraints()
algorithm = ICSANSGA2(n_offsprings=20)
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
CSIhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(CSIhv2))
icsansga['CSIhv'] = CSIhv2
class SRD_c(Problem):
def __init__(self):
super().__init__(n_var=7, n_obj=2, n_constr=11)
self.xl = anp.array([2.6, 0.7, 17, 7.3, 7.3, 2.9, 5])
self.xu = anp.array([3.6,0.8,28,8.3,8.3,3.9,5.5])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = SRD(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = SRD(x)
out["F"] = F
out["G"] = G
class SRD_c_withcheapconstraints(SRD_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
SRDhv = []
ref = np.array([7000,1700])
for i in range(10):
problem = SRD_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
SRDhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(SRDhv))
sansga['SRD'] = SRDhv
SRDhv2 = []
for i in range(10):
problem = SRD_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
SRDhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(SRDhv2))
icsansga['SRDhv'] = SRDhv2
class TRICOP_c(Problem):
def __init__(self):
super().__init__(n_var=2, n_obj=3, n_constr=3)
self.xl = anp.array([-4.0,-4.0])
self.xu = anp.array([4.0,4.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = TRICOP(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = TRICOP(x)
out["F"] = F
out["G"] = G
class TRICOP_c_withcheapconstraints(TRICOP_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
TRICOPhv = []
ref = np.array([34,-4,90])
for i in range(10):
problem = TRICOP_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
TRICOPhv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(TRICOPhv))
sansga['TRICOP'] = TRICOPhv
TRICOPhv2 = []
for i in range(10):
problem = TRICOP_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
TRICOPhv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(TRICOPhv2))
icsansga['TRICOPhv'] = TRICOPhv2
class BICOP1_c(Problem):
def __init__(self):
super().__init__(n_var=10, n_obj=2, n_constr=1)
self.xl = anp.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.xu = anp.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = BICOP1(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = BICOP1(x)
out["F"] = F
out["G"] = G
class BICOP1_c_withcheapconstraints(BICOP1_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
BICOP1hv = []
ref = np.array([9,9])
for i in range(10):
problem = BICOP1_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
BICOP1hv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(BICOP1hv))
sansga['BICOP1'] = BICOP1hv
BICOP1hv2 = []
for i in range(10):
problem = BICOP1_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
BICOP1hv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(BICOP1hv2))
icsansga['BICOP1hv'] = BICOP1hv2
class BICOP2_c(Problem):
def __init__(self):
super().__init__(n_var=10, n_obj=2, n_constr=2)
self.xl = anp.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.xu = anp.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
def _evaluate(self, x, out, *args, **kwargs):
if len(np.shape(x))>1:
F = []
G = []
for i in range(len(x)):
fi, gi = BICOP2(x[i])
F.append(fi)
G.append(gi)
F = np.array(F)
G = np.array(G)
out["F"] = F
out["G"] = G
else:
F,G = BICOP2(x)
out["F"] = F
out["G"] = G
class BICOP2_c_withcheapconstraints(BICOP2_c):
def _evaluate(self, x, out, *args, only_inexpensive_constraints=False, **kwargs):
if only_inexpensive_constraints:
d = {}
super()._evaluate(x, d, *args, **kwargs)
out["G"] = d["G"]
else:
super()._evaluate(x, out, *args, **kwargs)
BICOP2hv = []
ref = np.array([70,70])
for i in range(10):
problem = BICOP2_c()
n_evals = len(problem.xl)*40
algorithm = SANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
BICOP2hv.append(hypervolume(res.F, ref))
print(i)
print(np.mean(BICOP2hv))
sansga['BICOP2'] = BICOP2hv
BICOP2hv2 = []
for i in range(10):
problem = BICOP2_c_withcheapconstraints()
algorithm = ICSANSGA2()
res = minimize(problem, algorithm, ('n_evals', n_evals), seed=i, verbose=True)
BICOP2hv2.append(hypervolume(res.F, ref))
print(i)
print(np.mean(BICOP2hv2))
icsansga['BICOP2hv'] = BICOP2hv2
with open('sansgaii.json', 'w') as fp:
json.dump(sansga, fp)
for key in sansga:
print(key, np.mean(sansga[key]))
with open('icsansgaii.json', 'w') as fp:
json.dump(icsansga, fp)
for key in icsansga:
print(key, np.mean(icsansga[key])) | StarcoderdataPython |
143946 | <filename>conf/apps.py
from django.apps import AppConfig
class ConfConfig(AppConfig):
name = 'conf'
| StarcoderdataPython |
1685132 | <reponame>artofimagination/stereo-calibration-and-vSLAM
import os
import numpy as np
import glob
import shutil
from pathlib import Path
from backend import Backend, States, Modes
from pointCloudGLWidget import PointCloudGLWidget
from linePlotWidget import LinePlotWidget
from PyQt5 import QtCore
from PyQt5.QtCore import QThread
from PyQt5.QtWidgets import QMainWindow, QGridLayout, QAction
from PyQt5.QtWidgets import QSpinBox, QLabel, QHBoxLayout, QFileDialog
from PyQt5.QtWidgets import QWidget, QApplication, QCheckBox
from PyQt5.QtWidgets import QPushButton, QTabWidget, QVBoxLayout
from PyQt5.QtWidgets import QDoubleSpinBox, QComboBox, QGroupBox
# Deletes all files in the content path.
def _clearContent(content):
files = glob.glob(content)
for f in files:
if not os.path.isdir(f):
os.remove(f)
files = glob.glob(content)
for f in files:
if not os.path.isdir(f):
os.remove(f)
# Calibration widget.
# the only reason it is created to catch the press 'n' event.
class CalibWidget(QWidget):
takeImageTriggered = QtCore.pyqtSignal()
def keyPressEvent(self, event):
super(CalibWidget, self).keyPressEvent(event)
if event.key() == QtCore.Qt.Key_N:
self.takeImageTriggered.emit()
# Main Qt UI window
class MainWindow(QMainWindow):
# Folder that contains the saved UI settings.
SETTINGS_DIR = "settings"
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# Initialize worker thread related members.
self.workerThread = QThread(self)
self.worker = Backend()
self.worker.signals.framesSent.connect(self.updateVideo)
self.worker.signals.finished.connect(self.thread_complete)
self.worker.signals.error.connect(self.sigint_handler)
# init UI.
self.setMinimumSize(200, 100)
mainLayout = QGridLayout()
self._createMenu()
# Create tabs
tabwidget = QTabWidget()
calibratorLayout = self._createCalibrationUI()
self.calibratorLayoutWidget = CalibWidget()
self.calibratorLayoutWidget.setLayout(calibratorLayout)
tabwidget.addTab(self.calibratorLayoutWidget, "Sensor calibration")
bmConfiguratorLayout = self._createBlockMatchingConfiguratorUI()
bmConfiguratorLayoutWidget = QWidget()
bmConfiguratorLayoutWidget.setLayout(bmConfiguratorLayout)
tabwidget.addTab(
bmConfiguratorLayoutWidget, "Block Matching Configurator")
featureDetectorLayout = self._createFeatureDetectionUI()
featureDetectionLayoutWidget = QWidget()
featureDetectionLayoutWidget.setLayout(featureDetectorLayout)
tabwidget.addTab(featureDetectionLayoutWidget, "Feature detection")
motionEstimationLayout = self._createMotionEstimationUI()
motionEstimationLayoutWidget = QWidget()
motionEstimationLayoutWidget.setLayout(motionEstimationLayout)
tabwidget.addTab(motionEstimationLayoutWidget, "Motion estimation")
vSlamUILayout = self._createVSlamUI()
vSlamUILayoutWidget = QWidget()
vSlamUILayoutWidget.setLayout(vSlamUILayout)
tabwidget.addTab(vSlamUILayoutWidget, "Visual SLAM")
self._initUIElements()
mainLayout.addWidget(tabwidget, 0, 0)
mainWidget = QWidget()
mainWidget.setLayout(mainLayout)
self.setCentralWidget(mainWidget)
desktop = QApplication.desktop()
screenRect = desktop.screenGeometry()
self.resize(screenRect.width(), screenRect.height())
if not os.path.isdir(self.SETTINGS_DIR):
os.mkdir(self.SETTINGS_DIR)
self.show()
# Saves all UI values into an npz file.
# Saves all calibration images and chessboard.npz for each sensor
# in the folder named identical to the settings npz file
def _saveValues(self, settingsName):
np.savez_compressed(
settingsName,
bm_textureThreshold=self.textureThreshold.value(),
bm_min_disp=self.min_disp.value(),
bm_num_disp=self.num_disp.value(),
bm_blocksize=self.blockSize.value(),
bm_uniquenessRatio=self.uniquenessRatio.value(),
bm_speckleWindowSize=self.speckleWindowSize.value(),
bm_speckleRange=self.speckleRange.value(),
bm_disp12MaxDiff=self.disp12MaxDiff.value(),
bm_preFilterType=self.preFilterType.value(),
bm_preFilterSize=self.preFilterSize.value(),
bm_preFilterCap=self.preFilterCap.value(),
bm_smallerBlockSize=self.smallerBlockSize.value(),
bm_mode=self.blockMatching.currentIndex(),
bm_drawEpipolar=self.drawEpipolar.isChecked(),
bm_resolution=self.resolutionBm.currentIndex(),
bm_leftCameraIndex=self.bmCameraIndexLeft.currentIndex(),
bm_rightCameraIndex=self.bmCameraIndexLeft.currentIndex(),
pc_fov=self.fov.value(),
pc_samplingRatio=self.samplingRatio.value(),
pc_ignoreRendererMaxDepth=self.rendererMaxDepth.value(),
cal_calib_image_index=self.calib_image_index.value(),
cal_rms_limit=self.rms_limit.value(),
cal_advanced=self.advanced.isChecked(),
cal_ignoreExitingImageData=self
.ignoreExistingImageData
.isChecked(),
cal_rms_increment=self.increment.value(),
cal_max_rms=self.max_rms.value(),
cal_resolution=self.resolutionCal.currentIndex(),
cal_leftCameraIndex=self.calibCameraIndexLeft.currentIndex(),
cal_rightCameraIndex=self.calibCameraIndexRight.currentIndex(),
feat_featureDetector=self.featureDetector.currentIndex(),
feat_featureMatcher=self.featureMatcher.currentIndex(),
feat_maxDistance=self.maxDistance.value(),
motion_inliers=self.inliers.value(),
motion_maxDepth=self.maxDepth.value(),
motion_reprojectionError=self.reprojectionError.value())
settingsPath = Path(settingsName).with_suffix('')
settingsPath = settingsPath.stem
files = glob.glob(f"calibImages/left/{str(settingsPath)}/*")
for f in files:
os.remove(f)
files = glob.glob(f"calibImages/right/{str(settingsPath)}/*")
for f in files:
os.remove(f)
leftDirectory = f"calibImages/left/{str(settingsPath)}"
if not os.path.isdir(leftDirectory):
os.mkdir(leftDirectory)
files = glob.glob("calibImages/left/*")
for f in files:
if not os.path.isdir(f):
shutil.copy(f, leftDirectory)
rightDirectory = f"calibImages/right/{str(settingsPath)}"
if not os.path.isdir(rightDirectory):
os.mkdir(rightDirectory)
files = glob.glob("calibImages/right/*")
for f in files:
if not os.path.isdir(f):
shutil.copy(f, rightDirectory)
# Saves UI settings and calibration images/data
# Also creates a lastSaved folder
# for quick loading last saved info when the application starts.
def saveSettings(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(
self,
"QFileDialog.getSaveFileName()", "", "npz (*.npz)",
options=options)
if fileName:
self._saveValues(fileName)
_clearContent("calibImages/left/lastSaved/*")
_clearContent("calibImages/left/lastSaved/*")
self._saveValues(f"{self.SETTINGS_DIR}/lastSaved.npz")
# Loads and sets settings values from the npz file.
# Also loads the appropriate calib images and data.
def _setLoadedValues(self, settingsName):
_clearContent("calibImages/left/*")
_clearContent("calibImages/right/*")
settings = np.load(settingsName)
self.textureThreshold.setValue(settings["bm_textureThreshold"])
self.min_disp.setValue(settings["bm_min_disp"])
self.num_disp.setValue(settings["bm_num_disp"])
self.blockSize.setValue(settings["bm_blocksize"])
self.uniquenessRatio.setValue(settings["bm_uniquenessRatio"])
self.speckleWindowSize.setValue(
settings["bm_speckleWindowSize"])
self.speckleRange.setValue(settings["bm_speckleRange"])
self.disp12MaxDiff.setValue(settings["bm_disp12MaxDiff"])
self.preFilterType.setValue(settings["bm_preFilterType"])
self.preFilterSize.setValue(settings["bm_preFilterSize"])
self.preFilterCap.setValue(settings["bm_preFilterCap"])
self.blockMatching.setCurrentIndex(settings["bm_mode"])
self.drawEpipolar.setChecked(bool(settings["bm_drawEpipolar"]))
self.smallerBlockSize.setValue(settings["bm_smallerBlockSize"])
self.resolutionBm.setCurrentIndex(settings["bm_resolution"])
self.bmCameraIndexLeft.setCurrentIndex(settings["bm_leftCameraIndex"])
self.bmCameraIndexRight.setCurrentIndex(settings["bm_rightCameraIndex"])
self.fov.setValue(settings["pc_fov"])
self.samplingRatio.setValue(settings["pc_samplingRatio"])
self.rendererMaxDepth.setValue(settings["pc_ignoreRendererMaxDepth"])
self.calib_image_index.setValue(settings["cal_calib_image_index"])
self.rms_limit.setValue(settings["cal_rms_limit"])
self.advanced.setChecked(bool(settings["cal_advanced"]))
self.ignoreExistingImageData.setChecked(
bool(settings["cal_ignoreExitingImageData"]))
self.increment.setValue(settings["cal_rms_increment"])
self.max_rms.setValue(settings["cal_max_rms"])
self.resolutionCal.setCurrentIndex(settings["cal_resolution"])
self.calibCameraIndexLeft.setCurrentIndex(settings["cal_leftCameraIndex"])
self.calibCameraIndexRight.setCurrentIndex(settings["cal_rightCameraIndex"])
self.featureDetector.setCurrentIndex(settings["feat_featureDetector"])
self.featureMatcher.setCurrentIndex(settings["feat_featureMatcher"])
self.maxDistance.setValue(settings["feat_maxDistance"])
self.inliers.setValue(settings["motion_inliers"])
self.maxDepth.setValue(settings["motion_maxDepth"])
self.reprojectionError.setValue(settings["motion_reprojectionError"])
settingsPath = Path(settingsName).with_suffix('')
settingsPath = settingsPath.stem
directory = f"calibImages/left/{str(settingsPath)}"
if os.path.isdir(str(directory)):
files = glob.glob(f"{directory}/*")
for f in files:
shutil.copy(f, "calibImages/left/")
else:
print(
f"No left calib images to load \
(calibImages/left/{str(settingsPath)})")
directory = f"calibImages/right/{str(settingsPath)}"
if os.path.isdir(str(directory)):
files = glob.glob(f"{directory}/*")
for f in files:
shutil.copy(f, "calibImages/right/")
else:
print(f"No left calib images to load \
(calibImages/left/{str(settingsPath)})")
# Loads settings,calib images and data.
def loadSettings(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(
self,
"QFileDialog.getOpenFileName()", "", "npz (*.npz)",
options=options)
if fileName:
try:
self._setLoadedValues(fileName)
except IOError:
print("Settings file at {0} not found"
.format(fileName))
self.sigint_handler()
# Creates the menu items.
def _createMenu(self):
saveAction = QAction('&Save Settings', self)
saveAction.setShortcut('Ctrl+S')
saveAction.setStatusTip('Save settings')
saveAction.triggered.connect(self.saveSettings)
loadAction = QAction('&Load Settings', self)
loadAction.setShortcut('Ctrl+L')
loadAction.setStatusTip('Load settings')
loadAction.triggered.connect(self.loadSettings)
exitAction = QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.sigint_handler)
# Create menu bar and add action
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(saveAction)
fileMenu.addAction(loadAction)
fileMenu.addAction(exitAction)
# Creates the calibration tab UI.
def _createCalibrationUI(self):
layout = QGridLayout()
helpLabel = QLabel("Quick user guide:\n\
1. To start calibration interface, press start.\n\
2. Once started, to capture a pair of frames, press 'n' or 'Take image'\
when your sensor's are in the desired position.\n\
3. When enough calib images are create press process, to create the final\
calibration file.\n\n\
Use modes:\n\
a, Simple (Default): capture as many calibration images as you want, \
when done press 'Process'.\n\
b, Advanced: during capture the system will analyse the calibration RMS\
and will throw away if there is ahigh RMS result. See more in README.md\n\
When finished press 'Process'")
calibInfoLabel = QLabel("Calibration info")
self.calibInfo = QLabel()
RMSLabel = QLabel("RMS")
self.RMSValue = QLabel()
labelLayout = QVBoxLayout()
labelLayout.addWidget(helpLabel)
labelLayout.addWidget(calibInfoLabel)
labelLayout.addWidget(self.calibInfo)
labelLayout.addWidget(RMSLabel)
labelLayout.addWidget(self.RMSValue)
labelsLayoutWidget = QWidget()
labelsLayoutWidget.setLayout(labelLayout)
self.resolutionCal = QComboBox()
self.resolutionCal.addItems(["480p", "720p"])
ignoreExistingImageDataLabel = QLabel("Ignore existing image data")
self.ignoreExistingImageData = QCheckBox()
self.ignoreExistingImageData.setDisabled(True)
advancedLabel = QLabel("Advanced use mode")
self.advanced = QCheckBox()
self.advanced.setDisabled(True)
optionsLayout = QHBoxLayout()
optionsLayout.addWidget(QLabel("Resolution"))
optionsLayout.addWidget(self.resolutionCal)
optionsLayout.addWidget(ignoreExistingImageDataLabel)
optionsLayout.addWidget(self.ignoreExistingImageData)
optionsLayout.addWidget(advancedLabel)
optionsLayout.addWidget(self.advanced)
optionsLayoutWidget = QWidget()
optionsLayoutWidget.setLayout(optionsLayout)
self.calib_image_index = QSpinBox()
self.rms_limit = QDoubleSpinBox()
self.increment = QDoubleSpinBox()
self.max_rms = QDoubleSpinBox()
configLayout = QHBoxLayout()
configLayout.addWidget(QLabel("calib_image_index"))
configLayout.addWidget(self.calib_image_index)
configLayout.addWidget(QLabel("rms_limit"))
configLayout.addWidget(self.rms_limit)
configLayout.addWidget(QLabel("increment"))
configLayout.addWidget(self.increment)
configLayout.addWidget(QLabel("max_rms"))
configLayout.addWidget(self.max_rms)
configLayoutWidget = QWidget()
configLayoutWidget.setLayout(configLayout)
self.calib_image_index.setRange(0, 1000)
self.calib_image_index.setSingleStep(1)
self.rms_limit.setRange(0, 10.0)
self.rms_limit.setDecimals(5)
self.rms_limit.setSingleStep(0.005)
self.increment.setRange(0, 1.0)
self.increment.setDecimals(5)
self.increment.setSingleStep(0.005)
self.max_rms.setRange(0, 10.0)
self.max_rms.setDecimals(5)
self.max_rms.setSingleStep(0.005)
cameraLayout = QGridLayout()
displayGroupBox = QGroupBox("Visualisation")
displayGroupBox.setLayout(cameraLayout)
self.video0Calib = QLabel()
self.calibSwapCameras = QPushButton("Swap lenses")
self.calibCameraIndexLeft = QComboBox()
cameraLayout.addWidget(self.video0Calib, 0, 0, 4, 3)
cameraLayout.addWidget(self.calibSwapCameras, 5, 0, 1, 1)
cameraLayout.addWidget(QLabel("Left camera indices"), 5, 1, 1, 1)
cameraLayout.addWidget(self.calibCameraIndexLeft, 5, 2, 1, 1)
self.video1Calib = QLabel()
self.calibCameraIndexRight = QComboBox()
cameraLayout.addWidget(self.video1Calib, 0, 3, 4, 3)
cameraLayout.addWidget(QLabel("Right camera indices"), 5, 3, 1, 1)
cameraLayout.addWidget(self.calibCameraIndexRight, 5, 4, 1, 1)
layout.addWidget(displayGroupBox, 0, 0, 1, 1)
start = QPushButton("Start")
self.process = QPushButton("Process")
self.process.hide()
self.takeImage = QPushButton("Take image")
self.takeImage.hide()
start.clicked.connect(self._startCalibration)
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(start)
buttonLayout.addWidget(self.process)
buttonLayout.addWidget(self.takeImage)
buttonLayoutWidget = QWidget()
buttonLayoutWidget.setLayout(buttonLayout)
layout.addWidget(labelsLayoutWidget, 1, 0, 1, 2)
layout.addWidget(optionsLayoutWidget, 2, 0, 1, 4)
layout.addWidget(configLayoutWidget, 3, 0, 1, 4)
layout.addWidget(buttonLayoutWidget, 4, 0, 1, 4)
return layout
# Creates the block matching UI.
def _createBlockMatchingConfiguratorUI(self):
self.win_sizeUpdated = QtCore.pyqtSignal(int)
self.min_dispUpdated = QtCore.pyqtSignal(int)
self.num_dispUpdated = QtCore.pyqtSignal(int)
self.blockSizeUpdated = QtCore.pyqtSignal(int)
self.uniquenessRatioUpdated = QtCore.pyqtSignal(int)
self.speckleWindowSizeUpdated = QtCore.pyqtSignal(int)
self.speckleRangeUpdated = QtCore.pyqtSignal(int)
self.disp12MaxDiffUpdated = QtCore.pyqtSignal(int)
layout = QGridLayout()
self.textureThresholdLabel = QLabel("textureThreshold")
self.textureThreshold = QSpinBox()
self.min_disp = QSpinBox()
self.num_disp = QSpinBox()
self.blockSize = QSpinBox()
self.uniquenessRatio = QSpinBox()
self.speckleWindowSize = QSpinBox()
self.speckleRange = QSpinBox()
self.disp12MaxDiff = QSpinBox()
self.preFilterSizeLabel = QLabel("preFilterSize")
self.preFilterSize = QSpinBox()
self.preFilterTypeLabel = QLabel("preFilterType")
self.preFilterType = QSpinBox()
self.preFilterCapLabel = QLabel("preFilterCap")
self.preFilterCap = QSpinBox()
self.smallerBlockSizeLabel = QLabel("smallerBlockSize")
self.smallerBlockSize = QSpinBox()
self.start = QPushButton("Start")
self.start.clicked.connect(self._startBMConfiguration)
# Init spin boxes
self.textureThreshold.setRange(0, 10000)
self.min_disp.setRange(-1, 1000)
self.min_disp.setSingleStep(1)
self.num_disp.setRange(16, 1000)
self.num_disp.setSingleStep(16)
self.blockSize.setRange(5, 255)
self.blockSize.setSingleStep(2)
self.smallerBlockSize.setRange(-1, 1000000)
self.smallerBlockSize.setSingleStep(1)
self.uniquenessRatio.setRange(0, 1000)
self.speckleWindowSize.setRange(-1, 1000)
self.speckleRange.setRange(-1, 1000)
self.disp12MaxDiff.setRange(-1, 1000)
self.preFilterType.setRange(0, 1)
self.preFilterType.setSingleStep(1)
self.preFilterSize.setRange(5, 255)
self.preFilterSize.setSingleStep(2)
self.preFilterCap.setRange(1, 63)
self.preFilterCap.setSingleStep(1)
self.resolutionBm = QComboBox()
self.resolutionBm.addItems(["480p", "720p"])
self.blockMatching = QComboBox()
self.blockMatching.addItems(
["Block Matching", "Semi-Global Block Matching"])
self.drawEpipolar = QCheckBox()
self.video0Bm = QLabel()
self.bmCameraIndexLeft = QComboBox()
self.bmSwapCameras = QPushButton("Swap lenses")
self.video1Bm = QLabel()
self.bmCameraIndexRight = QComboBox()
self.video_disp = QLabel()
self.pointCloud = PointCloudGLWidget()
self.fov = QSpinBox()
self.fov.setRange(1, 360)
self.fov.valueChanged.connect(
self.pointCloud.setFov)
self.rendererMaxDepth = QSpinBox()
self.rendererMaxDepth.setRange(1, 5000)
self.rendererMaxDepth.valueChanged.connect(
self.pointCloud.setIgnoreDepthLimit)
self.samplingRatio = QSpinBox()
self.samplingRatio.setRange(1, 10000)
self.samplingRatio.setSingleStep(50)
self.samplingRatio.valueChanged.connect(
self.pointCloud.setSamplingRatio)
cameraLayout = QGridLayout()
displayGroupBox = QGroupBox("Visualisation")
displayGroupBox.setLayout(cameraLayout)
pointCloudLayout = QGridLayout()
pointCloudLayout.addWidget(self.pointCloud, 0, 0, 4, 4)
pointCloudLayout.addWidget(QLabel("Field of view"), 0, 4, 1, 1)
pointCloudLayout.addWidget(self.fov, 0, 5, 1, 1)
pointCloudLayout.addWidget(QLabel("Sampling ratio"), 1, 4, 1, 1)
pointCloudLayout.addWidget(self.samplingRatio, 1, 5, 1, 1)
pointCloudLayout.addWidget(QLabel("Ignore depth"), 2, 4, 1, 1)
pointCloudLayout.addWidget(self.rendererMaxDepth, 2, 5, 1, 1)
pointCloudControl = QGroupBox("Point cloud")
pointCloudControl.setLayout(pointCloudLayout)
cameraLayout.addWidget(self.video0Bm, 0, 0, 4, 4)
cameraLayout.addWidget(self.bmSwapCameras, 5, 0, 1, 1)
cameraLayout.addWidget(QLabel("Left camera indices"), 5, 1, 1, 1)
cameraLayout.addWidget(self.bmCameraIndexLeft, 5, 2, 1, 1)
cameraLayout.addWidget(self.video1Bm, 0, 3, 4, 4)
cameraLayout.addWidget(QLabel("Right camera indices"), 5, 3, 1, 1)
cameraLayout.addWidget(self.bmCameraIndexRight, 5, 4, 1, 1)
cameraLayout.addWidget(self.video_disp, 6, 0, 4, 4)
cameraLayout.addWidget(pointCloudControl, 6, 3, 4, 4)
layout.addWidget(displayGroupBox, 0, 0, 1, 6)
bmControlLayout = QGridLayout()
bmControlLayout.addWidget(QLabel("Block matching type"), 0, 0, 1, 1)
bmControlLayout.addWidget(self.blockMatching, 0, 1, 1, 1)
bmControlLayout.addWidget(QLabel("Draw epipolar lines"), 0, 2, 1, 1)
bmControlLayout.addWidget(self.drawEpipolar, 0, 3, 1, 1)
bmControlLayout.addWidget(QLabel("Resolution"), 0, 4, 1, 1)
bmControlLayout.addWidget(self.resolutionBm, 0, 5, 1, 1)
bmControlLayout.addWidget(self.textureThresholdLabel, 1, 0, 1, 1)
bmControlLayout.addWidget(self.textureThreshold, 1, 1, 1, 1)
bmControlLayout.addWidget(QLabel("min_disp"), 1, 2, 1, 1)
bmControlLayout.addWidget(self.min_disp, 1, 3, 1, 1)
bmControlLayout.addWidget(QLabel("num_disp"), 1, 4, 1, 1)
bmControlLayout.addWidget(self.num_disp, 1, 5, 1, 1)
bmControlLayout.addWidget(QLabel("blockSize"), 1, 6, 1, 1)
bmControlLayout.addWidget(self.blockSize, 1, 7, 1, 1)
bmControlLayout.addWidget(QLabel("uniquenessRatio"), 1, 8, 1, 1)
bmControlLayout.addWidget(self.uniquenessRatio, 1, 9, 1, 1)
bmControlLayout.addWidget(QLabel("speckleWindowSize"), 1, 10, 1, 1)
bmControlLayout.addWidget(self.speckleWindowSize, 1, 11, 1, 1)
bmControlLayout.addWidget(QLabel("speckleRange"), 1, 12, 1, 1)
bmControlLayout.addWidget(self.speckleRange, 1, 13, 1, 1)
bmControlLayout.addWidget(QLabel("disp12MaxDiff"), 1, 14, 1, 1)
bmControlLayout.addWidget(self.disp12MaxDiff, 1, 15, 1, 1)
bmControlLayout.addWidget(self.smallerBlockSizeLabel, 2, 0, 1, 1)
bmControlLayout.addWidget(self.smallerBlockSize, 2, 1, 1, 1)
bmControlLayout.addWidget(self.preFilterTypeLabel, 2, 2, 1, 1)
bmControlLayout.addWidget(self.preFilterType, 2, 3, 1, 1)
bmControlLayout.addWidget(self.preFilterCapLabel, 2, 4, 1, 1)
bmControlLayout.addWidget(self.preFilterCap, 2, 5, 1, 1)
bmControlLayout.addWidget(self.preFilterSizeLabel, 2, 6, 1, 1)
bmControlLayout.addWidget(self.preFilterSize, 2, 7, 1, 1)
bmControlGroup = QGroupBox("Block Matching control")
bmControlGroup.setLayout(bmControlLayout)
layout.addWidget(bmControlGroup, 2, 0, 1, 8)
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(self.start)
buttonLayoutWidget = QWidget()
buttonLayoutWidget.setLayout(buttonLayout)
layout.addWidget(buttonLayoutWidget, 4, 0, 1, 8)
return layout
## @brief Creates the feature detection UI.
def _createFeatureDetectionUI(self):
layout = QGridLayout()
self.videoFD = QLabel()
messageTitle = QLabel("Logging:")
messageLabel = QLabel()
self.worker.signals.updateFeatureInfo.connect(messageLabel.setText)
messageLayout = QVBoxLayout()
messageLayout.addWidget(messageTitle)
messageLayout.addWidget(messageLabel)
messageLayoutWidget = QWidget()
messageLayoutWidget.setLayout(messageLayout)
featureDetectorLabel = QLabel("Feature detector")
self.featureDetector = QComboBox()
self.featureDetector.addItems(["sift", "orb", "surf"])
self.featureDetector.currentTextChanged.connect(self.worker.updateFeatureDetector)
featureMatcherLabel = QLabel("Feature matcher")
self.featureMatcher = QComboBox()
self.featureMatcher.addItems(["BF", "FLANN"])
self.featureMatcher.currentTextChanged.connect(self.worker.updateFeatureMatcher)
maxDistanceLabel = QLabel("Max allowed distance between best matches")
self.maxDistance = QDoubleSpinBox()
self.maxDistance.valueChanged.connect(self.worker.updateMatchDistanceThreshold)
self.maxDistance.setRange(0.01, 100)
self.maxDistance.setSingleStep(0.01)
controlLayout = QGridLayout()
controlLayout.addWidget(featureDetectorLabel, 0, 2, 1, 1)
controlLayout.addWidget(self.featureDetector, 0, 3, 1, 1)
controlLayout.addWidget(featureMatcherLabel, 0, 4, 1, 1)
controlLayout.addWidget(self.featureMatcher, 0, 5, 1, 1)
controlLayout.addWidget(maxDistanceLabel, 0, 6, 1, 1)
controlLayout.addWidget(self.maxDistance, 0, 7, 1, 1)
controlLayoutWidget = QWidget()
controlLayoutWidget.setLayout(controlLayout)
start = QPushButton("Start")
start.clicked.connect(self._startFeatureDetection)
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(start)
buttonLayoutWidget = QWidget()
buttonLayoutWidget.setLayout(buttonLayout)
layout.addWidget(self.videoFD, 0, 0, 3, 1)
layout.addWidget(messageLayoutWidget, 3, 0, 1, 1)
layout.addWidget(controlLayoutWidget, 4, 0, 1, 1)
layout.addWidget(buttonLayoutWidget, 5, 0, 1, 1)
return layout
## @brief Creates the motion estimation UI.
def _createMotionEstimationUI(self):
layout = QGridLayout()
self.videoDepthME = QLabel()
self.motionDisplay = PointCloudGLWidget()
self.trajectoryPlotDepth = LinePlotWidget()
self.trajectoryPlotDepth.setAxisLabel("time", "z")
self.trajectoryPlotXY = LinePlotWidget()
self.trajectoryPlotXY.setAxisLabel("x", "y")
start = QPushButton("Start")
start.clicked.connect(self._startMotionEstimation)
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(start)
buttonLayoutWidget = QWidget()
buttonLayoutWidget.setLayout(buttonLayout)
inliersLabel = QLabel("inliers")
self.inliers = QSpinBox()
self.inliers.valueChanged.connect(self.worker.updateInlierLimit)
self.inliers.setRange(1, 200)
self.inliers.setSingleStep(1)
maxDepthLabel = QLabel("maxDepth")
self.maxDepth = QSpinBox()
self.maxDepth.valueChanged.connect(self.worker.updateMaxDepth)
self.maxDepth.setRange(1, 5000)
self.maxDepth.setSingleStep(1)
reprojectionErrorLabel = QLabel("reprojectionError")
self.reprojectionError = QDoubleSpinBox()
self.reprojectionError.valueChanged.connect(self.worker.updateReprojectionError)
self.reprojectionError.setRange(0.01, 10)
self.reprojectionError.setSingleStep(0.01)
controlLayout = QGridLayout()
controlLayout.addWidget(inliersLabel, 0, 0, 1, 1)
controlLayout.addWidget(self.inliers, 0, 1, 1, 1)
controlLayout.addWidget(maxDepthLabel, 0, 2, 1, 1)
controlLayout.addWidget(self.maxDepth, 0, 3, 1, 1)
controlLayout.addWidget(reprojectionErrorLabel, 0, 4, 1, 1)
controlLayout.addWidget(self.reprojectionError, 0, 5, 1, 1)
controlLayoutWidget = QWidget()
controlLayoutWidget.setLayout(controlLayout)
layout.addWidget(self.motionDisplay, 0, 0, 3, 4)
layout.addWidget(self.videoDepthME, 3, 0, 1, 2)
layout.addWidget(self.trajectoryPlotDepth, 3, 2, 2, 1)
layout.addWidget(self.trajectoryPlotXY, 3, 3, 2, 1)
layout.addWidget(controlLayoutWidget, 5, 0, 1, 4)
layout.addWidget(buttonLayoutWidget, 6, 0, 1, 4)
return layout
## @brief Creates the vSLAM UI.
def _createVSlamUI(self):
layout = QGridLayout()
self.videoDepthVSlam = QLabel()
self.motionDisplayVSlam = PointCloudGLWidget()
start = QPushButton("Start")
start.clicked.connect(self._startVSlam)
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(start)
buttonLayoutWidget = QWidget()
buttonLayoutWidget.setLayout(buttonLayout)
layout.addWidget(self.motionDisplayVSlam, 0, 0, 4, 4)
layout.addWidget(self.videoDepthVSlam, 2, 0, 1, 1)
layout.addWidget(buttonLayoutWidget, 4, 0, 1, 4)
return layout
# Updates the video stream labels.
def updateVideo(
self, v0, v1, v_depth_color, v_depth_gray, v_feature, trajectory, featureMap):
if self.worker.mode == Modes.Calibration:
self.process.show()
self.takeImage.show()
self.ignoreExistingImageData.setDisabled(False)
self.advanced.setDisabled(False)
self.video0Calib.setPixmap(v0)
self.video1Calib.setPixmap(v1)
elif self.worker.mode == Modes.BlockMatching:
self.video0Bm.setPixmap(v0)
self.video1Bm.setPixmap(v1)
self.video_disp.setPixmap(v_depth_color)
pointCloud = self.pointCloud.calculatePointCloud(v_depth_gray)
self.pointCloud.setMapVBO(pointCloud)
self.pointCloud.updateGL()
elif self.worker.mode == Modes.FeatureDetection:
self.videoFD.setPixmap(v_feature)
elif self.worker.mode == Modes.MotionEstimation:
self.videoDepthME.setPixmap(v_depth_color)
self.motionDisplay.setTrajectoryVBO(trajectory)
self.motionDisplay.updateGL()
if trajectory is not None:
self.trajectoryPlotDepth.plotData(
np.arange(len(trajectory)), trajectory[:, 2, 3])
self.trajectoryPlotXY.plotData(trajectory[:, 0, 3], trajectory[:, 1, 3])
elif self.worker.mode == Modes.Mapping:
self.motionDisplayVSlam.setTrajectoryVBO(trajectory)
self.motionDisplayVSlam.setMapVBO(featureMap)
self.motionDisplayVSlam.updateGL()
self.videoDepthVSlam.setPixmap(v_depth_color)
# Shows/hides the appropriate controls for SGBM and BM.
def updateBmType(self, index):
if index == 1:
self.textureThresholdLabel.hide()
self.textureThreshold.hide()
self.preFilterSizeLabel.hide()
self.preFilterSize.hide()
self.preFilterCapLabel.hide()
self.preFilterCap.hide()
self.preFilterTypeLabel.hide()
self.preFilterType.hide()
self.smallerBlockSizeLabel.hide()
self.smallerBlockSize.hide()
else:
self.textureThresholdLabel.show()
self.textureThreshold.show()
self.preFilterSizeLabel.show()
self.preFilterSize.show()
self.preFilterCapLabel.show()
self.preFilterCap.show()
self.preFilterTypeLabel.show()
self.preFilterType.show()
self.smallerBlockSizeLabel.show()
self.smallerBlockSize.show()
if self.worker is not None:
self.worker.updateBmType(index)
def thread_complete(self):
print("Worker thread stopped...")
# Updates the calib UI control from the worker thread.
def updateCalibInfo(self, rms, message):
self.RMSValue.setText(f"{rms}")
self.calibInfo.setText(message)
# Updates the camera indices in all tabs.
def updateCameraIndices(self, leftIndex, rightIndex, indicesList):
if self.bmCameraIndexLeft.count() == 0:
for index in indicesList:
self.bmCameraIndexLeft.addItem(f"{index}")
self.bmCameraIndexRight.addItem(f"{index}")
self.calibCameraIndexLeft.addItem(f"{index}")
self.calibCameraIndexRight.addItem(f"{index}")
self.bmCameraIndexLeft.setCurrentIndex(leftIndex)
self.bmCameraIndexRight.setCurrentIndex(rightIndex)
self.calibCameraIndexLeft.setCurrentIndex(leftIndex)
self.calibCameraIndexRight.setCurrentIndex(rightIndex)
# Sets the ranges, limits and signal connections for each UI element.
def _initUIElements(self):
info = self.worker.getSensorIndices()
self.updateCameraIndices(*info)
# wire bm signals/slots
self.blockMatching.currentIndexChanged.connect(
self.updateBmType)
self.resolutionBm.currentIndexChanged.connect(
self.worker.updateResolution)
self.drawEpipolar.toggled.connect(
self.worker.updateDrawEpipolar)
self.textureThreshold.valueChanged.connect(
self.worker.updateTextureThreshold)
self.min_disp.valueChanged.connect(self.worker.updateMin_disp)
self.num_disp.valueChanged.connect(self.worker.updateNum_disp)
self.blockSize.valueChanged.connect(self.worker.updateBlockSize)
self.uniquenessRatio.valueChanged.connect(
self.worker.updateUniquenessRatio)
self.speckleWindowSize.valueChanged.connect(
self.worker.updateSpeckleWindowSize)
self.speckleRange.valueChanged.connect(self.worker.updateSpeckleRange)
self.disp12MaxDiff.valueChanged.connect(self.worker.updateDisp12MaxDiff)
self.smallerBlockSize.valueChanged.connect(self.worker.updateSmallerBlockSize)
self.preFilterType.valueChanged.connect(self.worker.updatePreFilterType)
self.preFilterSize.valueChanged.connect(self.worker.updatePrefilterSize)
self.preFilterCap.valueChanged.connect(self.worker.updatePrefilterCap)
self.worker.signals.cameraIndicesUpdated.connect(
self.updateCameraIndices)
self.bmCameraIndexLeft.currentIndexChanged.connect(
self.worker.updateLeftCameraIndex)
self.bmCameraIndexRight.currentIndexChanged.connect(
self.worker.updateRightCameraIndex)
self.bmSwapCameras.clicked.connect(self.worker.swapSensorIndices)
# wire calib signals/slots
self.calibratorLayoutWidget.takeImageTriggered.connect(
self.worker.saveImage)
self.process.clicked.connect(self.worker.calibrateSensor)
self.takeImage.clicked.connect(self.worker.saveImage)
self.resolutionCal.currentIndexChanged.connect(
self.worker.updateResolution)
self.ignoreExistingImageData.toggled.connect(
self.worker.setIgnoreExistingImageData)
self.advanced.toggled.connect(self.worker.enableAdvancedCalib)
self.calib_image_index.valueChanged.connect(
self.worker.updateCalib_image_index)
self.rms_limit.valueChanged.connect(self.worker.updateRms_limit)
self.increment.valueChanged.connect(self.worker.updateIncrement)
self.max_rms.valueChanged.connect(self.worker.updateMax_rms)
self.worker.signals.updateCalibInfo.connect(self.updateCalibInfo)
self.worker.signals.rmsLimitUpdated.connect(self.rms_limit.setValue)
self.worker.signals.calibImageIndexUpdated.connect(
self.calib_image_index.setValue)
self.calibCameraIndexLeft.currentIndexChanged.connect(
self.worker.updateLeftCameraIndex)
self.calibCameraIndexRight.currentIndexChanged.connect(
self.worker.updateRightCameraIndex)
self.calibSwapCameras.clicked.connect(self.worker.swapSensorIndices)
# load values
try:
self._setLoadedValues(f"{self.SETTINGS_DIR}/lastSaved.npz")
except IOError:
print("Settings file at {0} not found"
.format(f"{self.SETTINGS_DIR}/lastSaved.npz"))
self.sigint_handler()
self.updateBmType(self.blockMatching.currentIndex())
# Start the thread with calibration.
# if the thread is already running, it will not restart it
# just set to calibration mode, if it wasn't yet.
def _startCalibration(self):
print("Starting calibration...")
if self.worker.mode == Modes.Calibration:
print("Calibration is already running...")
return
self.worker.enableAdvancedCalib(self.advanced.isChecked())
self.worker.setIgnoreExistingImageData(
self.ignoreExistingImageData.isChecked())
# Execute
if self.worker.mode == Modes.NoMode:
self.worker.moveToThread(self.workerThread)
self.workerThread.finished.connect(self.worker.deleteLater)
self.workerThread.started.connect(self.worker.run)
self.workerThread.start()
self.worker.state = States.Idle
self.worker.mode = Modes.Calibration
# @brief Start the thread with block matching.
#
# If the thread is already running, it will not restart it
# just set to block matching mode, if it wasn't yet.
def _startBMConfiguration(self):
print("Starting block matching...")
if self.worker.mode == Modes.BlockMatching:
print("Block Matching is already running...")
return
# Execute
if self.worker.mode == Modes.NoMode:
self.worker.moveToThread(self.workerThread)
self.workerThread.finished.connect(self.worker.deleteLater)
self.workerThread.started.connect(self.worker.run)
self.workerThread.start()
self.worker.state = States.Idle
self.worker.mode = Modes.BlockMatching
## @brieft Start the thread with feature detection.
#
# If the thread is already running, it will not restart it
# just set to block matching mode, if it wasn't yet.
def _startFeatureDetection(self):
print("Starting feature detection...")
if self.worker.mode == Modes.FeatureDetection:
print("Feature detection is already running...")
return
# Execute
if self.worker.mode == Modes.NoMode:
self.worker.moveToThread(self.workerThread)
self.workerThread.finished.connect(self.worker.deleteLater)
self.workerThread.started.connect(self.worker.run)
self.workerThread.start()
self.worker.state = States.Idle
self.worker.mode = Modes.FeatureDetection
## @brieft Start the thread with motion estimation.
#
# If the thread is already running, it will not restart it
# just set to block matching mode, if it wasn't yet.
def _startMotionEstimation(self):
print("Starting motion estimation...")
if self.worker.mode == Modes.MotionEstimation:
print("Motion estimation is already running...")
return
# Execute
if self.worker.mode == Modes.NoMode:
self.worker.moveToThread(self.workerThread)
self.workerThread.finished.connect(self.worker.deleteLater)
self.workerThread.started.connect(self.worker.run)
self.workerThread.start()
self.worker.state = States.Idle
self.worker.mode = Modes.MotionEstimation
## @brieft Start the thread with mapping functionality.
#
# If the thread is already running, it will not restart it
# just set to block matching mode, if it wasn't yet.
def _startVSlam(self):
print("Starting Visual SLAM...")
if self.worker.mode == Modes.Mapping:
print("Visual SLAM is already running...")
return
# Execute
if self.worker.mode == Modes.NoMode:
self.worker.moveToThread(self.workerThread)
self.workerThread.finished.connect(self.worker.deleteLater)
self.workerThread.started.connect(self.worker.run)
self.workerThread.start()
self.worker.state = States.Idle
self.worker.mode = Modes.Mapping
# Terminate UI and the threads appropriately.
def sigint_handler(self):
if self.worker is not None:
self.worker.stop = True
self.workerThread.quit()
self.workerThread.wait()
self.trajectoryPlotXY.clear()
self.trajectoryPlotDepth.clear()
print("Exiting app through GUI")
QApplication.quit()
| StarcoderdataPython |
55803 | import json
import os
from api_swgoh_help import api_swgoh_help, settings
from env import get_env
from initialise_data_structures import initialise_data_structures
from texttable import Texttable
from data_lookups import mod_set_stats, mod_slots, unit_stats, primary_stat_names_map
saved_data = initialise_data_structures()
def add_stat(stats, stat_name, value, upgrade_tier):
if stat_name in stats:
stats[stat_name].append([value, upgrade_tier])
else:
stats[stat_name] = [[value, upgrade_tier]]
def get_mods(allycode=0, force_reload=False, unit_exclusions=None, unit_inclusions=None):
env_data = get_env()
if allycode == 0:
allycode = env_data["allycode"]
saved_mods = {}
if not force_reload and os.path.isfile('saved-mods.json'):
with open('saved-mods.json', 'r', encoding='utf-8') as f:
saved_mods = json.load(f)
if allycode in saved_mods:
return saved_mods[allycode]
# Change the settings below
creds = settings(env_data["username"], env_data["password"])
client = api_swgoh_help(creds)
players_response = client.fetchRoster([allycode], enums=False)
units_without_mods = {}
units_upgradable_mods = {}
stats = {}
mods = {}
chars = {}
char_name_map = {}
for unit_id, unit_array in players_response[0].items():
unit = unit_array[0]
if unit_exclusions and saved_data["toons"][unit_id]["nameKey"] in unit_exclusions:
continue
if unit_inclusions and saved_data["toons"][unit_id]["nameKey"] in unit_exclusions not in unit_inclusions:
continue
if unit["level"] < 50:
continue
chars[unit_id] = {
"char_name": saved_data["toons"][unit_id]["nameKey"],
"starLevel": unit["starLevel"],
"level": unit["level"],
"mods": []
}
char_name_map[saved_data["toons"][unit_id]["nameKey"]] = unit_id
for x in range(len(unit["mods"])):
mod = unit["mods"][x]
mod_slot = mod_slots[x]
if "id" not in mod:
if unit["type"] == 1:
unit_without_mod = units_without_mods.get(unit_id, [])
unit_without_mod.append(mod_slot)
units_without_mods[unit_id] = unit_without_mod
continue
chars[unit_id]["mods"].append(mod["id"])
mod_stats = {}
mods[mod["id"]] = {
"set": mod_set_stats[mod["set"]],
"slot": mod_slot,
"stats": mod_stats,
"primary": primary_stat_names_map[mod["stat"][0][0]],
"char_name": saved_data["toons"][unit_id]["nameKey"]
}
for i in range(5):
if i > len(mod["stat"]) - 1:
print(unit_id, "appears to not have the correct amount of mod slots")
break
else:
if mod["stat"][i][0] == 0:
upgradable_mods = units_upgradable_mods.get(unit_id, [])
upgradable_mods.append(mod_slot)
units_upgradable_mods[unit_id] = upgradable_mods
name = unit_stats[0]
mod_stats[name] = 0
else:
name = unit_stats[mod["stat"][i][0]]
mod_stats[name] = mod["stat"][i][1]
add_stat(stats, name, mod["stat"][i][1], mod["stat"][i][2])
table = Texttable()
table.set_cols_align(["l", "l", "l"])
table.set_cols_valign(["m", "m", "m"])
rows = [["Character", "Missing Mods", "Upgradable Mods"]]
table_units = set(units_without_mods.keys())
table_units.update(set(units_upgradable_mods.keys()))
for unit_id in table_units:
rows.append([
saved_data["toons"][unit_id]["nameKey"],
", ".join(units_without_mods.get(unit_id, [])),
", ".join(units_upgradable_mods.get(unit_id, []))
])
table.add_rows(rows)
print(table.draw())
# save data
saved_mods[allycode] = {"mods": mods, "stats": stats, "chars": chars, "char_name_map": char_name_map}
with open('saved-mods.json', 'w', encoding='utf-8') as f:
json.dump(saved_mods, f, ensure_ascii=False, indent=4)
return saved_mods[allycode]
# run with force reload to update cache of stored data
all_units = [j["nameKey"] for j in saved_data["toons"].values()]
exclusions = []
get_mods(force_reload=True, unit_exclusions=exclusions, unit_inclusions=all_units)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.