seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
38527310676 | from cqpy import settings, cqp
from cqpy.events import on, EventType, Event
@on(EventType.Enable)
def on_enable(event: Event):
"""
机器人上线 demo
:param event:
:return:
"""
if 'master' in settings:
cqp.send_private_msg(settings['master'], '我上线啦!')
else:
cqp.add_log(10, __name__, '在settings.json中添加 `"master": 你的qq号` ,即可在机器人上线时收到问候啦!')
REPEAT_MODE = False
@on(EventType.GroupMessage)
def on_group_message(event: Event):
"""
复读机 demo
:param event:
:return:
"""
global REPEAT_MODE
if 'master' not in settings:
cqp.add_log(10, __name__, '在settings.json中添加 `"master": 你的qq号` ,即可体验复读机的快乐。')
if event.event_args['from_qq'] == settings['master'] and '复读开始' in event.event_args['msg']:
REPEAT_MODE = True
cqp.add_log(10, __name__, '复读开始!')
cqp.send_group_msg(event.event_args['from_group'], '复读开始!')
return
elif event.event_args['from_qq'] == settings['master'] and '复读结束' in event.event_args['msg']:
REPEAT_MODE = False
cqp.add_log(10, __name__, '复读结束!')
cqp.send_group_msg(event.event_args['from_group'], '复读结束!')
return
if REPEAT_MODE is True:
cqp.send_group_msg(event.event_args['from_group'], event.event_args['msg'])
| nnnewb/CQPy | cqpy/tutorial.py | tutorial.py | py | 1,451 | python | en | code | 1 | github-code | 36 |
10024409616 | import csv
import operator
import decimal
import numpy as np
import matplotlib.pyplot as pl
import calendar
import sys
import os.path
h = open('../results/MonthlyTurnoverOldCerrado.csv','rb')
data = csv.reader(h)
months = []
bints = []
seasons = []
boundarys = []
sumprecips = []
avghumid = []
avgvisits = []
for column in data:
months.append(column[0])
bints.append(column[1])
seasons.append(column[2])
boundarys.append(column[3])
sumprecips.append(column[4])
avghumid.append(column[5])
avgvisits.append(column[5])
h.close()
#remove header
for i in [months, bints, seasons, boundarys, sumprecips, avghumid, avgvisits]:
del i[0]
pl.plot(sumprecips, avgvisits, 'ro')
pl.grid(True)
pl.title('sumprecip-Avgvisits')
plotpath = '../results/' + 'sumprecip-Avgvisits' + '.pdf'
pl.savefig(plotpath)
pl.show() | musikzauberin/buzz | code/OldCode/plotting2.py | plotting2.py | py | 828 | python | en | code | 0 | github-code | 36 |
39277796402 | from PIL import Image, ImageSequence
from generators import Generator
class OverlayGenerator(Generator):
ALLOWED_OVERLAYS = {
"fire.gif": "burning",
"fire2.gif": "burning",
"sparkle.gif": "sparkling",
"sparkle2.gif": "sparkling",
"loving.gif": "loving",
}
def __init__(self):
super().__init__('overlay', defaults={
"overlay": "fire.gif"
})
def generate(self, original_name, input_path, output_dir, options):
options = {**self.defaults, **options}
overlay_file = options["overlay"]
if overlay_file not in OverlayGenerator.ALLOWED_OVERLAYS:
raise ValueError("Unknown overlay " + overlay_file)
overlay_name = OverlayGenerator.ALLOWED_OVERLAYS[overlay_file]
overlay = Image.open(f"resources/{overlay_file}")
emoji = self.load_image(input_path)
emoji = emoji[0]
emoji_name = Generator.get_emoji_name_from_file(original_name)
frames = []
emoji_w, emoji_h = emoji.size
palette = None
for i, overlay_frame in enumerate(ImageSequence.Iterator(overlay)):
canvas = Image.new("RGBA", emoji.size, (255, 255, 255))
if palette is None:
palette = overlay_frame.getpalette()
else:
overlay_frame.putpalette(palette)
# overlay_frame.save(f'../output/{overlay_name}.{i:02}.gif', 'GIF')
# cropped_frame = fire_frame.crop((0, 0, emoji_w, emoji_h))
overlay_frame.thumbnail(canvas.size)
overlay_frame = overlay_frame.convert('RGBA')
canvas.paste(emoji, (0, 0), mask=emoji)
offset = ((canvas.width - overlay_frame.width) // 2, (canvas.height - overlay_frame.height) // 2)
if overlay_name == 'burning':
offset = (0, emoji_h - overlay_frame.height + 5)
canvas.paste(overlay_frame, offset, mask=overlay_frame)
frames.append(canvas)
return self.write_gif(frames, output_dir, emoji_name + ".gif", options), f'{overlay_name}_{original_name}'
| grdaneault/emojigen | api/generators/overlay.py | overlay.py | py | 2,116 | python | en | code | 0 | github-code | 36 |
21597631585 | import random
class Parameters(object):
prob_m = 0.65
prob_f = 0.55
prob_q = 0.5
insect_num = 100
insect_iteration = 180
scenario_num = 100
x = 200
y = 200
step = 10
sample_num = 180
iteration = 50
pop_num = 50
test = True
insect_fall_machine = 1
eliminated_number = 3*pop_num-5
archive_maximum = 50
alpha = 0.8
max_insect_num = 100
min_insect_num = 13
season = None
Treatment = True
@classmethod
def get_random_insect_number(self):
return random.randint(13,100)
threshold = 0.6
discount_q = 0.2
discount_p = 0.3
P = 2
Q = 5000
if __name__ == '__main__':
pass
| Djiahui/insect | parameters.py | parameters.py | py | 603 | python | en | code | 0 | github-code | 36 |
40741554583 | import os
import time
def watch_for_shutdown_file():
directory = os.path.dirname(os.path.realpath(__file__))
shutdown_file = os.path.join(directory, "cmd.shutdown.tmp")
update_file = os.path.join(directory, "cmd.update.tmp")
update_script = os.path.join(directory, "update.sh")
while True:
if os.path.exists(shutdown_file):
os.remove(shutdown_file)
print("SHUTDOWN!")
os.system("systemctl poweroff")
break
if os.path.exists(update_file):
os.remove(update_file)
print("UPDATE!")
os.system(update_script)
time.sleep(5)
if __name__ == "__main__":
watch_for_shutdown_file() | playablestreets/the-plants | config/plants.watcher.py | plants.watcher.py | py | 706 | python | en | code | 0 | github-code | 36 |
22577730329 | '''
Created on 06.12.2013
@author: hfrieden
Import an Arma 2/Arma 3 unbinarized MDL file
'''
import struct
import bpy
import bmesh
import os.path as path
import ArmaToolbox
import ArmaTools
def getLayerMask(layer):
res = [False, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False]
res[layer % 20] = True
return res
# Datatype reading
def readULong(filePtr):
return struct.unpack("i", filePtr.read(4))[0]
def readSignature(filePtr):
return filePtr.read(4)
def readFloat(filePtr):
return struct.unpack("f", filePtr.read(4))[0]
def readChar(filePtr):
return struct.unpack("c", filePtr.read(1))[0]
def readByte(filePtr):
return struct.unpack("b", filePtr.read(1))[0]
def readString(filePtr):
res = b''
t = True
while t:
a = filePtr.read(1)
if a != b'\000':
res = res + a
else:
t = False
return res.decode("utf-8")
def makeLodName(fileName, lodLevel):
lodName = path.basename(fileName)
lodName = lodName.split(".")[0]
lodName = "{0}.{1}".format(lodName, lodLevel)
return lodName
def maybeAddEdgeSplit(obj):
obj.data.use_auto_smooth = True
obj.data.auto_smooth_angle = 3.1415927
#modifier = obj.modifiers.get("FHQ_ARMA_Toolbox_EdgeSplit")
#if modifier is None:
# modifier = obj.modifiers.new("FHQ_ARMA_Toolbox_EdgeSplit",
# type='EDGE_SPLIT')
#
# modifier.show_expanded = False
# modifier.use_edge_angle = False # Want only sharp edges
# modifier.use_edge_sharp = True
#obj.data.show_edge_sharp = True
def correctedResolution(r):
res = int(r)
if (r < 1000):
return r #res
values ={
10000000000000 : 'G',
3000000000000000 : 'RD',
#11010 : 'SV2',
8000000000000000 : 'VCG',
10000 : 'S1',
14000000000000000 : 'VPFG',
17000000000000000 : 'SP',
#10010 : 'S2',
12000000000000000 : 'VCFG',
20000000000000000 : 'SVVG',
1200 : 'VC',
9000000000000000 : 'VCFG',
15000000000000000 : 'VGG',
13000000000000000 : 'VPG',
18000000000000000 : 'SVVC',
1000000000000000 : 'M',
1100 : 'P',
21000000000000000 : 'WRL',
4000000000000000 : 'PTH',
40000000000000 : 'GPX',
7000000000000000 : 'FG',
10000000000000000 : 'VC',
6000000000000000 : 'VG',
1000 : 'GN',
16000000000000000 : 'VGFG',
20000000000000 : 'GB',
19000000000000000 : 'SVVP',
2000000000000000 : 'LC',
11000 : 'SV1',
20000 : 'ED',
5000000000000000 : 'HP',
11000000000000000 : 'VCG',
}
error = 1000000000000000000000
value = -1
for n in values:
x = abs(n-res)
if x < error:
error = x
value = n
return value
def resolutionName(r):
res = int(r)
if (r < 1000):
return str(res)
values ={
1.000e+3:'View Gunner',
1.100e+3:'View Pilot',
1.200e+3:'View Cargo',
1.000e+4:'Stencil Shadow',
2.000e+4:'Edit',
#1.001e+4:'Stencil Shadow 2',
1.100e+4:'Shadow Volume',
#1.101e+4:'Shadow Volume 2',
1.000e+13:'Geometry',
1.000e+15:'Memory',
2.000e+15:'Land Contact',
3.000e+15:'Roadway',
4.000e+15:'Paths',
5.000e+15:'Hit Points',
6.000e+15:'View Geometry',
7.000e+15:'Fire Geometry',
8.000e+15:'View Cargo Geometry',
9.000e+15:'View Cargo Fire Geometry',
1.000e+16:'View Commander',
1.100e+16:'View Commander Geometry',
1.200e+16:'View Commander Fire Geometry',
1.300e+16:'View Pilot Geometry',
1.400e+16:'View Pilot Fire Geometry',
1.500e+16:'View Gunner Geometry',
1.600e+16:'View Gunner Fire Geometry',
1.700e+16:'Sub Parts',
1.800e+16:'Cargo View shadow volume',
1.900e+16:'Pilot View shadow volume',
2.000e+16:'Gunner View shadow volume',
2.100e+16:'Wreckage',
2.000e+13:'Geometry Buoyancy',
4.000e+13:'Geometry PhysX'
}
error = 1000000000000000000000
value = -1
for n in values:
x = abs(n-res)
if x < error:
error = x
value = n
ret = values.get(value, "?")
if value == 1.000e+4 or value == 2.000e+4:
ret = ret + " " + str(r-value)
return ret
def decodeWeight(b):
if b == 0:
return 0.0
elif b == 2:
return 1.0
elif b > 2:
return 1.0 - round( (b-2) / 2.55555 )*0.01
elif b < 0:
return -round( b / 2.55555 ) * 0.01
else:
# print ("decodeWeight(",b,") returns 1.0 as else case")
return 1.0 #TODO: Correct?
def loadLOD(context, filePtr, objectName, materialData, layerFlag, lodnr):
global objectLayers
meshName = objectName
weightArray = []
# Check for P3DM signature
sig = readSignature(filePtr)
if sig != b'P3DM':
return -1
# Read major and minor version
major = readULong(filePtr)
minor = readULong(filePtr)
if major != 0x1c:
print("Unknown major version {0}".format(major))
return -1
if minor != 0x100:
print("Unknown minor version {0}".format(minor))
return -1
numPoints = readULong(filePtr)
numNormals = readULong(filePtr)
numFaces = readULong(filePtr)
print("read lod")
dummyFlags = readULong(filePtr)
# Read the Points. Points are XYZTriples followed by an ULONG flags word
verts = []
for i in range(0, numPoints):
point = struct.unpack("fffi", filePtr.read(16))
pnt = [point[0], point[2], point[1]]
verts.append(pnt)
print("normals (",numNormals, ")...")
normals = []
for i in range(0, numNormals):
normal = struct.unpack("fff", filePtr.read(12))
nrm = [normal[0], normal[1], normal[2]]
normals.append(normal)
#print ("Normal = ", normal)
faceData = []
faces = []
print("faces...")
# Start reading and adding faces
for i in range(0, numFaces):
numSides = readULong(filePtr)
# Vertex table
vIdx = []
nrmIdx = []
uvs = []
for n in range(0, 4):
vtable = struct.unpack("iiff", filePtr.read(16))
if n<numSides:
vIdx.append(vtable[0])
nrmIdx.append(vtable[1])
uvs.append( [vtable[2], vtable[3]])
faceFlags = readULong(filePtr)
textureName = readString(filePtr)
materialName = readString(filePtr)
faceData.append(
(numSides, nrmIdx, uvs, faceFlags, textureName, materialName)
)
faces.append(vIdx)
# Handle the material if it doesn't exists yet
if len(textureName) > 0 or len(materialName)>0:
try:
materialData[(textureName, materialName)]
except:
# Need to create a new material for this
#mat = bpy.data.materials.new("Arma Material")
mat = bpy.data.materials.new(path.basename(textureName) + " :: " + path.basename(materialName))
mat.armaMatProps.colorString = textureName
mat.armaMatProps.rvMat = materialName
if len(textureName) > 0 and textureName[0] == '#':
mat.armaMatProps.texType = 'Custom'
mat.armaMatProps.colorString = textureName
else:
mat.armaMatProps.texType = 'Texture'
mat.armaMatProps.texture = textureName
mat.armaMatProps.colorString = ""
materialData[(textureName, materialName)] = mat
if readSignature(filePtr) != b'TAGG':
print("No tagg signature")
return -1;
# Create the mesh. Doing it here makes the named selections
# easier to read.
mymesh = bpy.data.meshes.new(name=meshName)
mymesh.from_pydata(verts, [], faces)
mymesh.update(calc_edges = True)
obj = bpy.data.objects.new(meshName, mymesh)
# TODO: Maybe add a "logical Collection" option that
# Collects all geometries, shadows, custom etc in a collection.
scn = bpy.context.scene
coll = bpy.data.collections.new(meshName)
context.scene.collection.children.link(coll)
coll.objects.link(obj)
#NEIN! coll.hide_viewport = True
#scn.objects.link(obj)
#scn.objects.active = obj
# Build Edge database to make finding sharp edges easier
edgeDict = dict()
for edge in mymesh.edges:
v1 = edge.vertices[0]
v2 = edge.vertices[1]
if (v1 > v2): # Swap if out of order
temp = v2
v2 = v1
v1 = temp
#print(f"adding edge index {edge.index} as ({v1},{v2}) to edge dictionary")
edgeDict[(v1,v2)] = edge.index
print("taggs")
loop = True
sharpEdges = None
weight = None
while loop:
active = readChar(filePtr)
tagName = readString(filePtr)
numBytes = readULong(filePtr)
#print ("tagg: ",tagName, " size ", numBytes)
if active == b'\000':
if numBytes != 0:
filePtr.seek(numBytes, 1)
else:
if tagName == "#EndOfFile#":
loop = False
elif tagName == "#SharpEdges#":
# Read Sharp Edges
sharpEdges = []
for i in range(0,numBytes,8):
n1 = readULong(filePtr)
n2 = readULong(filePtr)
sharpEdges.append([n1, n2])
#print ("sharp edges", sharpEdges)
elif tagName == "#Property#":
# Read named property
propName = struct.unpack("64s", filePtr.read(64))[0].decode("utf-8")
propValue = struct.unpack("64s", filePtr.read(64))[0].decode("utf-8")
item = obj.armaObjProps.namedProps.add()
item.name=propName;
item.value=propValue
elif tagName == "#UVSet#":
id = readULong(filePtr)
layerName = "UVSet " + str(id)
if id == 0:
# Name first layer "UVMap" so that there isn't any fuckups with uv sets
layerName = "UVMap"
#print("adding UV set " + layerName)
mymesh.uv_layers.new(name=layerName)
layer = mymesh.uv_layers[-1]
index = 0
for faceIdx in range(0,numFaces):
n = faceData[faceIdx][0]
for x in range(0,n):
u = readFloat(filePtr)
v = readFloat(filePtr)
layer.data[index].uv = [u,1 - v]
index += 1
elif tagName == "#Mass#":
weightArray = []
weight = 0;
for idx in range (0,numPoints):
f = readFloat(filePtr)
weightArray.append(f)
weight += f
elif tagName[0] == '#':
# System tag we don't read
filePtr.seek(numBytes, 1)
else:
# Named Selection
# Add a vertex group
# First, check the tagName for a proxy
newVGrp = True
if len(tagName) > 5:
if tagName[:6] == "proxy:":
newVGrp = False
vgrp = obj.vertex_groups.new(name = "@@armaproxy")
prp = obj.armaObjProps.proxyArray
prx = tagName.split(":")[1]
if prx.find(".") != -1:
a = prx.split(".")
prx = a[0]
idx = a[-1]
if len(idx) == 0:
idx = "1"
else:
idx = "1"
n = prp.add()
n.name = vgrp.name
n.index = int(idx)
n.path = "P:" + prx
tagName = "@@armyproxy"
if newVGrp == True:
vgrp = obj.vertex_groups.new(name = tagName)
for i in range(0, numPoints):
b = readByte(filePtr)
w = decodeWeight(b)
if (w>0):
vgrp.add([i],float(w),'REPLACE')
#print("b = ",b,"w = ", w)
for i in range(0, numFaces):
b = readByte(filePtr)
w = decodeWeight(b)
# print("b = ",b,"w = ", w)
# if w== 1.0:
# pPoly = obj.data.polygons[i]
# for n in range(0,len(pPoly.vertices)):
# idx = pPoly.vertices[n]
# vgrp.add([idx], w, 'REPLACE')
#filePtr.seek(numFaces, 1)
# Done with the taggs, only the resolution is left to read
resolution = readFloat(filePtr)
#meshName = meshName + "." + resolutionName(resolution)
meshName = resolutionName(resolution)
mymesh.name = meshName
obj.name = meshName
coll.name = meshName
print("materials...")
indexData = {}
# Set up materials
for faceIdx in range(0,numFaces):
fd = faceData[faceIdx]
textureName = fd[4]
materialName = fd[5]
try:
mat = materialData[(textureName, materialName)]
# Add the material if it isn't in
if mat.name not in mymesh.materials:
mymesh.materials.append(mat)
thisMatIndex = len(mymesh.materials)-1
indexData[mat] = thisMatIndex
#print("added new material at " + str(thisMatIndex))
else:
thisMatIndex = indexData [mat]
#print("old material " + str(thisMatIndex))
mymesh.polygons[faceIdx].material_index = thisMatIndex
except:
pass
print("sharp edges")
# Set sharp edges
#if sharpEdges is not None:
# for edge in mymesh.edges:
# v1 = edge.vertices[0]
# v2 = edge.vertices[1]
# if [v1,v2] in sharpEdges:
# mymesh.edges[edge.index].use_edge_sharp = True
# elif [v2,v1] in sharpEdges:
# mymesh.edges[edge.index].use_edge_sharp = True
# else:
# print(f"Edge pair {v1},{v2} not found in edges")
# New Code
if sharpEdges is not None:
for sharpEdge in sharpEdges:
v1 = sharpEdge[0]
v2 = sharpEdge[1]
if (v1 > v2): # Swap if out of order
temp = v2
v2 = v1
v1 = temp
try: # Apparently, some models have sharp edges that (no longer) exist.
idx = edgeDict[(v1,v2)]
mymesh.edges[idx].use_edge_sharp = True
except:
print(f"WARNING: Edge {v1},{v2} does not exist")
#for pair in sharpEdges:
# p1 = pair[0]
# p2 = pair[1]
# edge = mymesh.edges.get([mymesh.vertices[p1], mymesh.vertices[p2]])
# print("edge = ", edge)
# #if edge != None:
# # edge.use_edge_sharp = True
# TODO: This causes faces with the same vertices but different normals to
# be discarded. Don't want that
#mymesh.validate()
print("Normal calculation")
mymesh.calc_normals()
for poly in mymesh.polygons:
poly.use_smooth = True
print("Add edge split")
maybeAddEdgeSplit(obj)
#scn.update()
obj.select_set(True)
#if layerFlag == True:
# # Move to layer
# objectLayers = getLayerMask(lodnr)
# bpy.ops.object.move_to_layer(layers=objectLayers)
hasSet = False
oldres = resolution
resolution = correctedResolution(resolution)
offset = oldres - resolution
obj.armaObjProps.isArmaObject = True
if resolution <= 1000:
obj.armaObjProps.lodDistance = resolution
hasSet = True
else:
obj.armaObjProps.lodDistance = offset #0.0
print("set LOD type")
# Set the right LOD type
lodPresets = ArmaToolbox.lodPresets
for n in lodPresets:
if float(n[0]) == resolution:
obj.armaObjProps.lod = n[0]
hasSet = True
if hasSet == False:
print("Error: unknown lod %f" % (resolution))
print("resolution %d" % (correctedResolution(resolution)))
print("weight")
if weight is not None:
obj.armaObjProps.mass = weight
if len(weightArray) > 0:
bm = bmesh.new()
bm.from_mesh(obj.data)
bm.verts.ensure_lookup_table()
weight_layer = bm.verts.layers.float.new('FHQWeights')
weight_layer = bm.verts.layers.float['FHQWeights']
print(weight_layer)
for i in range(0,len(weightArray)):
bm.verts[i][weight_layer] = weightArray[i]
bm.to_mesh(obj.data)
obj.select_set(False)
if obj.armaObjProps.lod == '1.000e+13' or obj.armaObjProps.lod == '4.000e+13':
ArmaTools.attemptFixMassLod (obj)
if obj.armaObjProps.lod == '-1.0':
ArmaTools.PostProcessLOD(obj)
print("done reading lod")
return 0
# Main Import Routine
def importMDL(context, fileName, layerFlag):
global objectLayers
objectLayers = [True, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False]
currentLayer = 0
filePtr = open(fileName, "rb")
objName = path.basename(fileName).split(".")[0]
# This is used to collect combinations of texture and rvmat
# in order to generate Materials
materialData = {}
# Read the header
sig = readSignature(filePtr)
version = readULong(filePtr)
numLods = readULong(filePtr)
print ("Signature = {0}, version={1}, numLods = {2}".format(sig, version, numLods))
if version != 257 or sig != b'MLOD':
return -1
# Start loading lods
for i in range(0,numLods):
if loadLOD(context, filePtr, objName, materialData, layerFlag, i) != 0:
return -2
filePtr.close()
return 0
| AlwarrenSidh/ArmAToolbox | ArmaToolbox/MDLImporter.py | MDLImporter.py | py | 18,846 | python | en | code | 70 | github-code | 36 |
30509808806 | #!/home/meichen/anaconda3/bin/python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import glob
import matplotlib.ticker as mticker
from matplotlib.ticker import StrMethodFormatter, NullFormatter
def main():
data = pd.read_csv('pairsfile_rgp_select.csv',skipinitialspace=True)
data_array = np.array(data)
jpath = '/home/meichen/work1/SR_Attn/pair_events'
phase = 'P'
distance = '85'
pairs = {}
masterid = []
numberid = []
for i in np.arange(len(data_array[:,0])):
if data_array[i,0] not in pairs.keys():
masterid.append(data_array[i,0])
numberid.append(data_array[i,19])
pairs.setdefault(data_array[i,0],[]).append(data_array[i,6])
for key in list(pairs.keys()):
index=list(data_array[:,0]).index(key)
os.chdir('{}/master_{}'.format(jpath,key))
num = 0
fig = plt.figure(figsize=[4,2])
ax1 = fig.add_subplot(111)
for value in list(pairs.get(key)):
stn_num = glob.glob('egf_{}/{}/gcarc_{}/all*'.format(value,phase,distance))[0].split('.')[7]
d = np.genfromtxt('{}'.format(glob.glob('egf_{}/{}/gcarc_{}/all*'.format(value,phase,distance))[0]))
d = d[d[:,0]<2.0]
indices = [l for l,x in enumerate(data_array[:,0]) if x == key]
index = list(data_array[l,6] for l in indices).index(value)
fc = data_array[indices[0]+index,17]
a = data_array[indices[0]+index,15]
b = data_array[indices[0]+index,16]
ax1.loglog(d[:,0],d[:,1],'C{}'.format(num),label='{} stn:{}'.format(value,stn_num),lw=0.5,alpha=0.75)
ax1.loglog(d[:,0],func_Boatwright(d[:,0],a,b,fc),linestyle='--',color='grey',lw=1)
ax1.plot(fc,func_Boatwright(fc,a,b,fc),marker='v',markeredgecolor='C{}'.format(num),markerfacecolor='C{}'.format(num),linewidth=2)
num = num + 1
num = num % 9
ax1.set_xlabel('Frequency (Hz)',size=8)
ax1.set_ylabel('Spectral ratios',size=8)
ax1.set_xticks([0.025,0.1,1,2])
ax1.set_xticklabels([0.025,0.1,1,2])
ax1.yaxis.set_major_locator(mticker.LogLocator(subs=(0.3,1.0,)))
ax1.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax1.yaxis.set_minor_formatter(mticker.NullFormatter())
ax1.tick_params(axis='both',which='both',labelsize=6)
print(key,phase,distance)
n = masterid.index(key)
ax1.set_title('# {}'.format(numberid[n]),size=10)
fig.tight_layout()
plt.savefig('/home/meichen/Research/SR_Attn/pair_events/figures/master_{}.pdf'.format(numberid[n]))
plt.close()
def func(x,a,b,c):
return a * (1 + x**2 / b**2)/(1 + x**2 / c**2)
def func_Boatwright(x,a,b,c):
return a * (1 + x**4/ b**4)**0.5 / (1+x**4/c**4)**0.5
main()
| meichenl95/SR_deepfocus | figures/egfs_one.py | egfs_one.py | py | 2,853 | python | en | code | 0 | github-code | 36 |
37407869802 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import os
import shutil
df = pd.read_csv('../data/speakers_all.csv')
df.drop(columns=['Unnamed: 9', 'Unnamed: 10', 'Unnamed: 11'], inplace=True)
df_full = df[df['file_missing?'] == False]
df_full['sex'] = df_full['sex'].apply(lambda x: 'female' if x == 'famale' else x)
top10languages = df_full['native_language'].value_counts()[:10].index.values
train_set_filenames = []
test_set_filenames = []
for language in top10languages:
X = df_full[df_full['native_language'] == language]['filename'].values
y = [language] * len(X)
X_train, X_test, y_train, y_test = train_test_split(X, y)
train_set_filenames.append(X_train)
test_set_filenames.append(X_test)
train_names = []
for lst in train_set_filenames:
for file in lst:
train_names.append(file)
test_names = []
for lst in test_set_filenames:
for file in lst:
test_names.append(file)
df_full['train_test_none'] = df_full['filename'].apply(lambda x: "train" if x in train_names else "test" if x in test_names else "none")
df_full[df_full['train_test_none'] != 'none'].to_csv("../data/train_test.csv")
current_dir = "../data/recordings/wav_16khz"
for idx, row in df_full.iterrows():
if row['train_test_none'] == 'none':
continue
elif row['train_test_none'] == 'train':
new_path = os.path.join("../data/recordings/train_set", row['native_language'], row['filename'] + ".wav")
elif row['train_test_none'] == 'test':
new_path = os.path.join("../data/recordings/test_set", row['native_language'], row['filename'] + ".wav")
current_path = os.path.join(current_dir, row['filename'] + ".wav")
shutil.copyfile(current_path, new_path) | acaldwell93/Accent-Classification | src/create_train_test_directories.py | create_train_test_directories.py | py | 1,769 | python | en | code | 0 | github-code | 36 |
2888068360 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
import logging
from tornado.escape import json_encode
from tornado.escape import json_decode
from tornado.escape import utf8
from .constant import *
logger = logging.getLogger('server.' + __name__)
class JsonStream:
def __init__(self, stream, address):
self._stream = stream
self._address = address
self._stream.read_until(bDELIMITER, self.on_read)
async def on_read(self, data):
try:
if data:
dict_ = json_decode(data)
await self.on_read_json(dict_)
except Exception as e:
logger.error('Error occurs during decoding data from device.\n\
{}'.format(e), exc_info=True)
self._stream.read_until(bDELIMITER, self.on_read)
def on_read_json(self, dict_):
pass
def send_json(self, dict_):
if not self._stream.closed():
self._stream.write(utf8(json_encode(dict_) + DELIMITER))
| All-less/exotic-server | lib/json_stream.py | json_stream.py | py | 1,023 | python | en | code | 2 | github-code | 36 |
8550138978 | from urllib.request import urlopen
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import plotly
import json
import os
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\..', 'data'))
usa_data_dir = os.path.join(data_dir, 'usa')
csv_dir = os.path.join(usa_data_dir, 'csv')
csv_data = os.path.join(csv_dir, 'us-counties.csv')
df = pd.read_csv(csv_data, sep=',', header=0)
# Filtering by latest date
df = df.loc[df['date'] == '2020-08-04']
# Filling empty cells
df = df.fillna(0)
# Removing rows where fips is 0
df = df.loc[df['fips'] != 0]
df = df.loc[df['state'] == 'New York']
geojson_url = 'https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json'
with urlopen(geojson_url) as response:
counties = json.load(response)
maps_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\..', 'maps'))
result_html = os.path.join(maps_dir, 'ny-counties-coronavirus-heatmap.html')
fig = px.choropleth_mapbox(df,
geojson=counties,
locations='fips',
color='deaths',
hover_data=["county", "state", "deaths", "cases"],
color_continuous_scale="Jet",
range_color=(0, 30),
mapbox_style="carto-positron",
zoom=6.0, center={"lat": 42.723, "lon": -75.762},
opacity=0.5,
labels={'county': 'County', 'state': 'State'}
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
plotly.offline.plot(fig, filename=result_html)
state_data = os.path.join(csv_dir, 'us-states.csv')
state_code_data = os.path.join(csv_dir, 'us-agr-exports-2011.csv')
df_state = pd.read_csv(state_data, sep=',', header=0)
junk_data = pd.read_csv(state_code_data, sep=',', header=0)
df_code = junk_data[['code', 'state']]
plot_data = df_state.merge(df_code, on=['state'], how='left')
plot_data = plot_data.loc[plot_data['date'] == '2020-08-04']
print(plot_data)
states_plot = os.path.join(maps_dir, 'usa-states-coronavirus-heatmap.html')
plot_data['text'] = 'State: ' + plot_data['state'].astype(str) + '<br>' + \
'Cases: ' + plot_data['cases'].astype(str) + '<br>' + \
'Deaths: ' + plot_data['deaths'].astype(str)
# Color-scales: https://plotly.com/python/v3/colorscales/
# Maps Reference: https://plotly.com/python/choropleth-maps/
fig = go.Figure(data=go.Choropleth(
locations=plot_data['code'],
z=plot_data['deaths'],
locationmode='USA-states',
colorscale=[
[0.0, 'rgb(165,0,38)'],
[0.1111111111111111, 'rgb(215,48,39)'],
[0.2222222222222222, 'rgb(244,109,67)'],
[0.3333333333333333, 'rgb(253,174,97)'],
[0.4444444444444444, 'rgb(254,224,144)'],
[0.5555555555555556, 'rgb(224,243,248)'],
[0.6666666666666666, 'rgb(171,217,233)'],
[0.7777777777777778, 'rgb(116,173,209)'],
[0.8888888888888888, 'rgb(69,117,180)'],
[1.0, 'rgb(49,54,149)']
],
text=plot_data['text'], # hover text
colorbar_title='Deaths'
))
fig.update_layout(
title_text='2020 Coronavirus Deaths in USA',
geo=dict(
scope='usa',
projection=go.layout.geo.Projection(type='albers usa')
)
)
plotly.offline.plot(fig, filename=states_plot)
| vinitshah24/Coronavirus-Analysis | src/usa/data_plots.py | data_plots.py | py | 3,402 | python | en | code | 0 | github-code | 36 |
70786129065 | from Bio.Seq import Seq
import os
def parse_query_results(path):
"""
This fucntion will p[arse the query results
:param path: The path of the file that contains the query results
:return: An array that contains the results described in the file
"""
file = open(path,"r")
results = []
for line in file:
split_by_tab = line.split("\t")
dictionary = {}
dictionary["qseqid"] = split_by_tab[0]
dictionary["sseqid"] = split_by_tab[1]
dictionary["pident"] = split_by_tab[2]
dictionary["length"] = split_by_tab[3]
dictionary["mismatch"] = split_by_tab[4]
dictionary["gapopen"] = split_by_tab[5]
dictionary["qstart"] = split_by_tab[6]
dictionary["qend"] = split_by_tab[7]
dictionary["sstart"] = split_by_tab[8]
dictionary["send"] = split_by_tab[9]
dictionary["evalue"] = split_by_tab[10]
dictionary["bitscore"] = split_by_tab[11][:-1]
results.append(dictionary)
return results
def get_pairs(query_results):
"""
This function will build and return the pairs of (miRNA,host) for every full match
:param query_results:The given array of results
:return: An array of pairs
"""
pairs = []
for res in query_results:
pident = float(res["pident"])
if pident == 100:
miRNA = res['qseqid']
host = res["sseqid"]
try:
index_semicolon = host.index(";")
index = host.index("|")
pre = host[:index_semicolon]
post = host[index]
host = "%s%s" % (pre,post)
except(ValueError):
pass
pairs.append((miRNA, host))
return pairs
def get_3_tag_UTR(host_header,file_name):
"""
This function will return the sequence that the given header describes (in the mRNA file)
:param host_header: The given host header
:return: The sequence that the given header describes (in the mRNA file)
"""
mRNA_path_file = r"%s\Resources\%s.fasta" %(os.path.dirname(os.path.abspath(__file__)),file_name)
mRNA_file = open(mRNA_path_file,"r")
string_to_find = ">%s" % host_header
start_sequence_assembly = False
stop_search = False
sequence = ""
for line in mRNA_file:
if stop_search:
return sequence
if start_sequence_assembly:
if line[0] == ">":
stop_search = True
else:
sequence = "%s%s" % (sequence,line[:-1])
else:
if string_to_find in line:
start_sequence_assembly = True
if sequence!= "":
return sequence
return None # Should not get here
def get_cbr_miRNA_That_Met_threshold(query_results_cbr):
"""
This function will add the 'is_conserved' key to the dictionaries
:param query_results_cbr: the list of dictionaries
:return: None
"""
for query_res in query_results_cbr:
pident = float(query_res['pident'])
if pident == 100:
query_res['is_conserved'] = True
else:
query_res['is_conserved'] = False
def get_miRNA_and_host_sequences(pairs,miRNA_flie_name,mRNA_file_name):
"""
This function will find the sequences of all the host mRNAs and will add them to the pairs array
:param pairs: [(miRNA,mRNA)]
:return: [(miRNA,(mRNA,sequence))]
"""
new_pairs = []
leng = len(pairs)
count = 1
for pair in pairs:
print("%d/%d" % (leng,count))
count+=1
host_header = pair[1]
host_sequence = get_3_tag_UTR(host_header,mRNA_file_name)
miRNA_header = pair[0]
miRNA_sequence = get_3_tag_UTR(miRNA_header, miRNA_flie_name)
new_pairs.append(((miRNA_header,miRNA_sequence),(host_header,host_sequence)))
return new_pairs
def get_seed(sequence):
"""
This function will return the seed of the sequence
:param sequence: The sequence
:return: The seed of the sequence
"""
return sequence[1:8]
def is_seed_criteria_met(miRNA,mRNA):
"""
This function will check if the seed criteria is met
:param miRNA: The miRNA sequence
:param mRNA: The mRNA sequence
:return: True IFF the seed criteria is met
"""
if mRNA == "Sequence unavailable":
return "unknown"
miRNA_seed = get_seed(miRNA)
miRNA_seed = miRNA_seed.replace("U","T")
miRNA_seq = Seq(miRNA_seed)
miRNA_seed = miRNA_seq.reverse_complement()
return miRNA_seed._data in mRNA
def is_seed_criteria_met_in_pairs(pairs):
"""
This function will return a dictionary that contains weather the seed criteria is met.
This process occurs for every pair in the pairs' list
:param pairs: The given pairs list
:return: A dictionary that contains weather the seed criteria is met.
"""
res = []
for pair in pairs:
dictionary = {}
miRNA_seq = pair[0][1]
mRNA_seq = pair[1][1]
is_target = is_seed_criteria_met(miRNA_seq,mRNA_seq)
dictionary["miRNA_name"] = pair[0][0]
dictionary["miRNA_seq"] = pair[0][1]
dictionary["Host_name"] = pair[1][0]
dictionary["Host_seq"] = pair[1][1]
dictionary["is_target"] = is_target
res.append(dictionary)
return res
def parse_results_to_csv(query_results):
"""
This function will parse the results and save them in a csv file name 'results.csv'
:param query_results: The given data to save
:return: None
"""
with open('results.csv', 'w') as file:
file.write("")
with open('results.csv','a') as file:
header = ""
for key in query_results[0].keys():
header = "%s%s," % (header,key)
header = "%s\n" % header
file.write(header)
for parse_dictionary in query_results:
with open('results.csv','a') as f:
line = ""
for key in parse_dictionary.keys():
line = "%s%s," % (line, parse_dictionary[key])
line = "%s\n" % line
f.write(line)
def get_all_cell_dictionaries(path_cel,path_cel_pre):
"""
This function will return all the C.elegans miRNA's
:param path_cel: The path to the C.elegans miRNA's files
:param path_cel_pre: The path to the pre-mature C.elegans miRNA's files
:return: A dictionary containing all the C.elegans miRNA's
"""
cel_file = open(path_cel,'r')
all_cell_dictionary = []
odd = True
name = None
seq = None
for line in cel_file:
if odd:
name = line[1:-1]
odd = False
else:
cell_dictionary = {}
seq = line[:-1]
odd = True
cell_dictionary['C.elegans mature name'] = name
cell_dictionary['C.elegans mature sequence'] = seq
all_cell_dictionary.append(cell_dictionary)
odd = True
name = None
seq = None
cel_file_pre = open(path_cel_pre, 'r')
all_cell_pre_dictionary = []
for line in cel_file_pre:
if odd:
name = line[1:-1]
odd = False
else:
cell_dictionary = {}
seq = line[:-1]
odd = True
cell_dictionary['C.elegans pre-miRNA name'] = name
cell_dictionary['C.elegans pre-mRNA sequence'] = seq
all_cell_pre_dictionary.append(cell_dictionary)
combined = []
for i in range(len(all_cell_dictionary)):
for j in range(len(all_cell_pre_dictionary)):
pre_name = all_cell_pre_dictionary[j]['C.elegans pre-miRNA name']
pre_seq = all_cell_pre_dictionary[j]['C.elegans pre-mRNA sequence']
cel_name = all_cell_dictionary[i]['C.elegans mature name']
cel_seq = all_cell_dictionary[i]['C.elegans mature sequence']
if pre_name[:pre_name.rindex('_')] == cel_name[:cel_name.rindex('_')]:
dict = {}
dict['C.elegans pre-miRNA name'] = pre_name
dict['C.elegans pre-miRNA sequence'] = pre_seq
dict['C.elegans mature name'] = cel_name
dict['C.elegans mature sequence'] = cel_seq
combined.append(dict)
return combined
def add_host_data(all_cell_dictionary, final_pairs_mRNA):
"""
This function will add the host data to the given cel dictionary
:param all_cell_dictionary: The cell dictionary
:param final_pairs_mRNA: The host data
:return: None
"""
for i in range(len(all_cell_dictionary)):
cell_dictionary = all_cell_dictionary[i]
name = cell_dictionary['C.elegans mature name']
host_exists = False
for j in range(len(final_pairs_mRNA)):
if final_pairs_mRNA[j]["miRNA_name"] == name:
host_exists = True
host_name = final_pairs_mRNA[j]["Host_name"]
is_target = final_pairs_mRNA[j]["is_target"]
cell_dictionary["Host gene name"] = host_name
if is_target == True:
cell_dictionary["Targets the host gene"] = 'yes'
elif is_target == False:
cell_dictionary["Targets the host gene"] = 'no'
else:
cell_dictionary["Targets the host gene"] = 'unknown'
if not host_exists:
cell_dictionary["Host gene name"] = "-"
cell_dictionary["Targets the host gene"] = '-'
def add_cbr_data(all_cell_dictionary, query_results_cbr):
"""
This function will add the C.briggsae data to the given cel dictionary
:param all_cell_dictionary: The cell dictionary
:param query_results_cbr: The C.briggsae data
:return: None
"""
for i in range(len(all_cell_dictionary)):
cell_dictionary = all_cell_dictionary[i]
name = cell_dictionary['C.elegans mature name']
cbr_exists = False
for j in range(len(query_results_cbr)):
if query_results_cbr[j]["qseqid"] == name:
cbr_exists = True
cbr_name = query_results_cbr[j]["sseqid"]
if query_results_cbr[j]["is_conserved"]:
cell_dictionary["Conserved in C.briggsae"] = cbr_name
else:
cell_dictionary["Conserved in C.briggsae"] = False
if not cbr_exists:
cell_dictionary["Conserved in C.briggsae"] = "-"
if __name__ == "__main__":
path_mRNA = "%s\%s" %(os.path.dirname(os.path.abspath(__file__)),r"Resources\res_blastn_compact_mRNA.fasta")
path_cbr = "%s\%s" %(os.path.dirname(os.path.abspath(__file__)),r"Resources\res_blastn_compact_cbr.fasta")
path_cel = "%s\%s" %(os.path.dirname(os.path.abspath(__file__)),r"Resources\cel.fasta")
path_cel_pre = "%s\%s" %(os.path.dirname(os.path.abspath(__file__)),r"Resources\cel-pre.fasta")
print("parsing query result")
query_results_mRNA = parse_query_results(path_mRNA)
query_results_cbr = parse_query_results(path_cbr)
print("get pairs")
pairs_mRNA = get_pairs(query_results_mRNA)
get_cbr_miRNA_That_Met_threshold(query_results_cbr)
print("get host")
new_pairs_mRNA = get_miRNA_and_host_sequences(pairs_mRNA,"cel","mRNA")
print("Updating seed criteria")
final_pairs_mRNA = is_seed_criteria_met_in_pairs(new_pairs_mRNA)
print("Gathering data")
all_cell_dictionary = get_all_cell_dictionaries(path_cel,path_cel_pre)
add_host_data(all_cell_dictionary,final_pairs_mRNA)
add_cbr_data(all_cell_dictionary,query_results_cbr)
parse_results_to_csv(all_cell_dictionary) | guys79/BioInformaticsProject | main.py | main.py | py | 11,616 | python | en | code | 0 | github-code | 36 |
36955780399 | from wiredtiger import stat
from wtdataset import SimpleDataSet
from wtscenario import make_scenarios
from rollback_to_stable_util import test_rollback_to_stable_base
def mod_val(value, char, location, nbytes=1):
return value[0:location] + char + value[location+nbytes:]
# test_rollback_to_stable04.py
# Test that rollback to stable always replaces the on-disk value with a full update
# from the history store.
class test_rollback_to_stable04(test_rollback_to_stable_base):
format_values = [
('column', dict(key_format='r', value_format='S')),
('column_fix', dict(key_format='r', value_format='8t')),
('row_integer', dict(key_format='i', value_format='S')),
]
in_memory_values = [
('no_inmem', dict(in_memory=False)),
('inmem', dict(in_memory=True))
]
prepare_values = [
('no_prepare', dict(prepare=False)),
('prepare', dict(prepare=True))
]
dryrun_values = [
('no_dryrun', dict(dryrun=False)),
('dryrun', dict(dryrun=True))
]
evict = [
('no_evict', dict(evict=False)),
('evict', dict(evict=True))
]
scenarios = make_scenarios(format_values, in_memory_values, prepare_values, dryrun_values, evict)
def conn_config(self):
config = 'cache_size=500MB,statistics=(all),verbose=(rts:5)'
if self.in_memory:
config += ',in_memory=true'
return config
def test_rollback_to_stable(self):
nrows = 1000
# Create a table.
uri = "table:rollback_to_stable04"
ds_config = ',log=(enabled=false)' if self.in_memory else ''
ds = SimpleDataSet(self, uri, 0,
key_format=self.key_format, value_format=self.value_format, config=ds_config)
ds.populate()
if self.value_format == '8t':
value_a = 97 # 'a'
value_b = 98 # 'b'
value_c = 99 # 'c'
value_d = 100 # 'd'
# No modifies in FLCS; do ordinary updates instead.
value_modQ = 81 # 'Q'
value_modR = 82 # 'R'
value_modS = 83 # 'S'
value_modT = 84 # 'T'
value_modW = 87 # 'W'
value_modX = 88 # 'X'
value_modY = 89 # 'Y'
value_modZ = 90 # 'Z'
else:
value_a = "aaaaa" * 100
value_b = "bbbbb" * 100
value_c = "ccccc" * 100
value_d = "ddddd" * 100
value_modQ = mod_val(value_a, 'Q', 0)
value_modR = mod_val(value_modQ, 'R', 1)
value_modS = mod_val(value_modR, 'S', 2)
value_modT = mod_val(value_c, 'T', 3)
value_modW = mod_val(value_d, 'W', 4)
value_modX = mod_val(value_a, 'X', 5)
value_modY = mod_val(value_modX, 'Y', 6)
value_modZ = mod_val(value_modY, 'Z', 7)
# Pin oldest and stable to timestamp 10.
self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
',stable_timestamp=' + self.timestamp_str(10))
# Perform a combination of modifies and updates.
self.large_updates(uri, value_a, ds, nrows, self.prepare, 20)
self.large_modifies(uri, 'Q', ds, 0, 1, nrows, self.prepare, 30)
self.large_modifies(uri, 'R', ds, 1, 1, nrows, self.prepare, 40)
self.large_modifies(uri, 'S', ds, 2, 1, nrows, self.prepare, 50)
# Evict the pages to disk
if self.evict:
self.evict_cursor(uri, nrows, value_modS)
self.large_updates(uri, value_b, ds, nrows, self.prepare, 60)
self.large_updates(uri, value_c, ds, nrows, self.prepare, 70)
self.large_modifies(uri, 'T', ds, 3, 1, nrows, self.prepare, 80)
self.large_updates(uri, value_d, ds, nrows, self.prepare, 90)
self.large_modifies(uri, 'W', ds, 4, 1, nrows, self.prepare, 100)
self.large_updates(uri, value_a, ds, nrows, self.prepare, 110)
self.large_modifies(uri, 'X', ds, 5, 1, nrows, self.prepare, 120)
self.large_modifies(uri, 'Y', ds, 6, 1, nrows, self.prepare, 130)
self.large_modifies(uri, 'Z', ds, 7, 1, nrows, self.prepare, 140)
# Verify data is visible and correct.
self.check(value_a, uri, nrows, None, 21 if self.prepare else 20)
self.check(value_modQ, uri, nrows, None, 31 if self.prepare else 30)
self.check(value_modR, uri, nrows, None, 41 if self.prepare else 40)
self.check(value_modS, uri, nrows, None, 51 if self.prepare else 50)
self.check(value_b, uri, nrows, None, 61 if self.prepare else 60)
self.check(value_c, uri, nrows, None, 71 if self.prepare else 70)
self.check(value_modT, uri, nrows, None, 81 if self.prepare else 80)
self.check(value_d, uri, nrows, None, 91 if self.prepare else 90)
self.check(value_modW, uri, nrows, None, 101 if self.prepare else 100)
self.check(value_a, uri, nrows, None, 111 if self.prepare else 110)
self.check(value_modX, uri, nrows, None, 121 if self.prepare else 120)
self.check(value_modY, uri, nrows, None, 131 if self.prepare else 130)
self.check(value_modZ, uri, nrows, None, 141 if self.prepare else 140)
# Pin stable to timestamp 40 if prepare otherwise 30.
if self.prepare:
self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(40))
else:
self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(30))
# Checkpoint to ensure the data is flushed, then rollback to the stable timestamp.
if not self.in_memory:
self.session.checkpoint()
self.conn.rollback_to_stable('dryrun={}'.format('true' if self.dryrun else 'false'))
# Check that the correct data is seen at and after the stable timestamp.
self.check(value_modQ, uri, nrows, None, 30)
if self.dryrun:
self.check(value_modZ, uri, nrows, None, 150)
self.check(value_a, uri, nrows, None, 20)
else:
self.check(value_modQ, uri, nrows, None, 150)
self.check(value_a, uri, nrows, None, 20)
stat_cursor = self.session.open_cursor('statistics:', None, None)
calls = stat_cursor[stat.conn.txn_rts][2]
hs_removed = stat_cursor[stat.conn.txn_rts_hs_removed][2]
hs_removed_dryrun = stat_cursor[stat.conn.txn_rts_hs_removed_dryrun][2]
hs_sweep = stat_cursor[stat.conn.txn_rts_sweep_hs_keys][2]
hs_sweep_dryrun = stat_cursor[stat.conn.txn_rts_sweep_hs_keys_dryrun][2]
keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2]
keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2]
pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2]
upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2]
upd_aborted_dryrun = stat_cursor[stat.conn.txn_rts_upd_aborted_dryrun][2]
stat_cursor.close()
self.assertEqual(calls, 1)
self.assertEqual(keys_removed, 0)
self.assertEqual(keys_restored, 0)
self.assertGreater(pages_visited, 0)
if self.dryrun:
self.assertEqual(upd_aborted + hs_removed + hs_sweep, 0)
self.assertGreaterEqual(upd_aborted_dryrun + hs_removed_dryrun + hs_sweep_dryrun, nrows * 11)
elif self.in_memory:
self.assertEqual(upd_aborted, nrows * 11)
self.assertEqual(hs_removed + hs_sweep, 0)
self.assertEqual(upd_aborted_dryrun + hs_removed_dryrun + hs_sweep_dryrun, 0)
else:
self.assertGreaterEqual(upd_aborted + hs_removed + hs_sweep, nrows * 11)
self.assertEqual(upd_aborted_dryrun + hs_removed_dryrun + hs_sweep_dryrun, 0)
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_rollback_to_stable04.py | test_rollback_to_stable04.py | py | 7,749 | python | en | code | 24,670 | github-code | 36 |
21418641611 |
import ismrmrd
import os
import itertools
import logging
import numpy as np
import numpy.fft as fft
import ctypes
import mrdhelper
from datetime import datetime
# Folder for debug output files
debugFolder = "/tmp/share/debug"
def groups(iterable, predicate):
group = []
for item in iterable:
group.append(item)
if predicate(item):
yield group
group = []
def conditionalGroups(iterable, predicateAccept, predicateFinish):
group = []
try:
for item in iterable:
if item is None:
break
if predicateAccept(item):
group.append(item)
if predicateFinish(item):
yield group
group = []
finally:
iterable.send_close()
def process(connection, config, metadata):
logging.info("Config: \n%s", config)
logging.info("Metadata: \n%s", metadata)
# Discard phase correction lines and accumulate lines until "ACQ_LAST_IN_SLICE" is set
for group in conditionalGroups(connection, lambda acq: not acq.is_flag_set(ismrmrd.ACQ_IS_PHASECORR_DATA), lambda acq: acq.is_flag_set(ismrmrd.ACQ_LAST_IN_SLICE)):
image = process_group(group, config, metadata)
logging.debug("Sending image to client:\n%s", image)
connection.send_image(image)
def process_group(group, config, metadata):
# Create folder, if necessary
if not os.path.exists(debugFolder):
os.makedirs(debugFolder)
logging.debug("Created folder " + debugFolder + " for debug output files")
# Format data into single [cha RO PE] array
data = [acquisition.data for acquisition in group]
data = np.stack(data, axis=-1)
logging.debug("Raw data is size %s" % (data.shape,))
np.save(debugFolder + "/" + "raw.npy", data)
# Fourier Transform
data = fft.fftshift(data, axes=(1, 2))
data = fft.ifft2(data)
data = fft.ifftshift(data, axes=(1, 2))
# Sum of squares coil combination
data = np.abs(data)
data = np.square(data)
data = np.sum(data, axis=0)
data = np.sqrt(data)
logging.debug("Image data is size %s" % (data.shape,))
np.save(debugFolder + "/" + "img.npy", data)
# Normalize and convert to int16
data *= 32767/data.max()
data = np.around(data)
data = data.astype(np.int16)
# Remove phase oversampling
nRO = np.size(data,0)
data = data[int(nRO/4):int(nRO*3/4),:]
logging.debug("Image without oversampling is size %s" % (data.shape,))
np.save(debugFolder + "/" + "imgCrop.npy", data)
# Format as ISMRMRD image data
image = ismrmrd.Image.from_array(data, acquisition=group[0])
image.image_index = 1
# Set field of view
image.field_of_view = (ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.x),
ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.y),
ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.z))
# Set ISMRMRD Meta Attributes
meta = ismrmrd.Meta({'DataRole': 'Image',
'ImageProcessingHistory': ['FIRE', 'PYTHON'],
'WindowCenter': '16384',
'WindowWidth': '32768'})
# Add image orientation directions to MetaAttributes if not already present
if meta.get('ImageRowDir') is None:
meta['ImageRowDir'] = ["{:.18f}".format(image.getHead().read_dir[0]), "{:.18f}".format(image.getHead().read_dir[1]), "{:.18f}".format(image.getHead().read_dir[2])]
if meta.get('ImageColumnDir') is None:
meta['ImageColumnDir'] = ["{:.18f}".format(image.getHead().phase_dir[0]), "{:.18f}".format(image.getHead().phase_dir[1]), "{:.18f}".format(image.getHead().phase_dir[2])]
xml = meta.serialize()
logging.debug("Image MetaAttributes: %s", xml)
logging.debug("Image data has %d elements", image.data.size)
image.attribute_string = xml
return image
| HMS-CardiacMR/MyoMapNet | InLine_Implementation/Code/simplefft.py | simplefft.py | py | 4,004 | python | en | code | 23 | github-code | 36 |
19865653500 | import streamlit as st
import base64
import os
from nbanalyzer import *
from PIL import Image
import time
script_directory = os.getcwd()
PROGRESS_BAR_CUSTOM_COLOR = '#f63366'
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
def load_data(year: int, stat_type: str):
if stat_type == 'play-by-play':
return get_players_data(year, stat_type, 1)
elif stat_type == 'advanced_box_score':
return get_advanced_metrics(year)
return get_players_data(year, stat_type, 0)
def filedownload(df):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download="playerstats.csv">Download CSV File</a>'
return href
def translate_stat_type(stat_type):
if stat_type == 'per_game':
return 'Per Game'
elif stat_type == 'totals':
return 'Total'
elif stat_type == 'per_minute':
return 'Per 36 Minutes'
elif stat_type == 'advanced':
return 'Advanced'
elif stat_type == 'per_poss':
return 'Per 100 Possessions'
elif stat_type == 'play-by-play':
return 'Play-by-Play'
elif stat_type == 'advanced_box_score':
return 'Advanced Box Score'
return 'None'
def main():
st.set_option('deprecation.showPyplotGlobalUse', False)
icon = Image.open(os.path.join(script_directory, 'favicon.ico'))
st.set_page_config('NBA Stats Explorer', icon)
st.markdown('<img src=\"https://cdn.nba.com/logos/nba/nba-logoman-75-word_white.svg\" alt=\"NBA logo\" style=\"width:150px\"> ' ,
unsafe_allow_html=True)
st.title('NBA Stats Explorer')
st.markdown("""
This app performs simple webscraping of NBA player stats data!
* **Python libraries:** base64, matplotlib, pandas, plotly, streamlit
* **Data source:** [Basketball-reference.com](https://www.basketball-reference.com/).
""")
st.sidebar.header('User Input Features')
selected_year = st.sidebar.selectbox('Year', list(reversed(range(1977,2023))))
selected_stat = st.sidebar.selectbox('Player Stats', STAT_TYPES, format_func=translate_stat_type)
playerstats = load_data(selected_year, selected_stat)
# Sidebar - Team selection
sorted_unique_team = sorted(playerstats.Tm.unique())
selected_team = st.sidebar.multiselect('Team', sorted_unique_team, sorted_unique_team)
# Sidebar - Position selection
unique_pos = ['C','PF','SF','PG','SG']
selected_pos = st.sidebar.multiselect('Position', unique_pos, unique_pos)
# Filtering data
df_selected_team = playerstats[(playerstats.Tm.isin(selected_team)) & (playerstats.Pos.isin(selected_pos))]
st.header('Displaying Players\' ' + translate_stat_type(selected_stat) + ' Stats of Selected Team(s)')
st.write('Data Dimension: ' + str(df_selected_team.shape[0]) + ' rows and ' + str(df_selected_team.shape[1]) + ' columns.')
st.dataframe(df_selected_team)
st.markdown(filedownload(df_selected_team), unsafe_allow_html=True)
if selected_year < 2022:
best_players = get_mvp_voting(selected_year, 5)
else:
best_players = ['Nikola Jokić', 'Joel Embiid', 'Chris Paul', 'Stephen Curry', 'Kevin Durant', 'Giannis Antetokounmpo',
'Ja Morant', 'Luka Dončić', 'Devin Booker', 'DeMar DeRozan', 'Jimmy Butler']
with st.spinner('Loading season summary...'):
st.header(f'{selected_year} Season Summary')
st.write(f"""
The {selected_year} season was the {ordinal(selected_year - 1946)} of the [National Basketball Association](https://en.wikipedia.org/wiki/National_Basketball_Association).
As usual, we have to analyze its vast data and explore player performances to decide which players performed the best!
""")
if selected_year < 2022:
with st.expander(f'{selected_year} NBA MVP'):
st.write(f"""
### MVP
This season's MVP was **{best_players[0]}** who won the prize against the likes of {best_players[1]}, {best_players[2]}
and {best_players[3]}.
""")
with st.expander(f'Intercorrelation Matrix Heatmap - {selected_year}'):
st.markdown("""
### Intercorrelation Matrix Heatmap
The matrix is calculated from a cross-tabulation and shows how statistically similar all pairs of variables are in their
distributions across the various samples. The table below shows the intercorrelations between per game player stats.
""")
with st.spinner('Loading heatmap...'):
draw_intercorrelation_heatmap(selected_year)
st.pyplot()
with st.expander(f'Scoring - {selected_year}'):
st.markdown("""
### Points per 75 possessions x TS% Scatter Plot
The scatter plot is used to analyze the relation between \"inflation adjusted\" scoring and efficiency from players across the league.
""")
with st.spinner('Loading scatter plot'):
st.write(gen_scoring_efficiency_plot(selected_year, best_players))
if selected_year >= 1980:
with st.expander(f'Shooting - {selected_year}'):
st.markdown("""
### 3-Point Attempts x 3P% Scatter Plot
The scatter plot is used to analyze the relation between 3-Point Field Goal attempts per 100 possessions and 3-Point Field Goal
Percentage from players across the league as well as observe the evolution of shooting along the decades.
""")
with st.spinner('Loading scatter plot'):
st.write(gen_shooting_efficiency_plot(selected_year))
with st.expander(f'Playmaking - {selected_year}'):
st.markdown("""
### Offensive Load x Box Creation Scatter Plot
The scatter plot is used to analyze the relation between a per 100 estimate of the number of true shots created for teammates and
the percentage of possessions a player is directly or indirectly involved in a true shooting attempt, or commits a turnover.
""")
with st.spinner('Loading scatter plot'):
st.write(gen_playmaking_plot(selected_year))
with st.expander('Player Finder'):
st.markdown("""
### Player Finder
Player Finder is a tool to explore the database and see how specific players are performing relative to the league in 5 major categories
**Scoring, Efficiency, Shooting, Creation and Load**. Try it out and see how your favorite NBA star is doing :triumph::basketball:.
""")
st.markdown(f"""
<style>
.st-g3 {{
background-color: {PROGRESS_BAR_CUSTOM_COLOR};
}}
</style>
""", unsafe_allow_html=True)
advanced_box_score = get_advanced_metrics(selected_year)
selected_player = st.selectbox('Player Name', advanced_box_score['Player'])
showed_name = False
if selected_player != '':
with st.spinner('Loading player summary'):
for stat in ADVANCED_BOX_SCORE_COLS[3:]:
result = get_player_percentile_from_advanced_stat(advanced_box_score, selected_player, stat)
if result.empty:
break
if not showed_name:
player_name = result.iloc[0]['Player']
st.markdown(f'#### {player_name} {selected_year} Summary')
showed_name = True
player_stat = int(result.iloc[0][stat] * 100)
st.markdown(f'{stat} - {ordinal(player_stat)} Percentile')
st.progress(player_stat)
if selected_year >= 1997:
with st.expander(f'Impact - {selected_year}'):
st.markdown("""
### Impact metrics
Impact metrics are used to measure a player's impact on the success of a given team. In this selection:
* **On-Off**: Average difference between the Plus/Minus when player is on the court vs. off the court.
* **OnCourt**: Plus/Minus Per 100 Possessions (On Court only).
* **BPM**: A box score estimate of the points per 100 possessions a player contributed above a league-average player, translated to an average team.
""")
st.write(gen_on_off_plot(selected_year, best_players))
if __name__ == '__main__':
main()
| tta13/NBA-Stats-Explorer | nba_app.py | nba_app.py | py | 9,140 | python | en | code | 2 | github-code | 36 |
30123704876 | from base.game_2048 import game_2048
import copy
class game_2048_power_state_custom_score(game_2048):
def get_state(self):
state = self.board.flatten()
powers_state = []
for el in state:
power = 0
while el > 1:
power += 1
el /= 2
powers_state.append(power)
# set the state to be used in get_score
return powers_state
def get_score(self, with_potential=True):
if not with_potential:
return self.score
# Worked really really really bad with this :)
# if self.is_game_over():
# return self.board.max() - 2048
potential = 0
for i in range(0, 4): # 4 possible moves
game_clone = copy.copy(self)
game_clone.do_game_round(i)
# potential += game_clone.get_score(with_potential=False) - self.score
potential = max(potential, game_clone.get_score(with_potential=False) - self.score)
# Divide by for because
# 1. Divide 2 because the potential is calculated twice (for both opposite directions)
# for example: up and down (cam do both moves if one is possible)
return potential // 2 # A bit bad too so
# return self.score + potential // 2
| daneel95/Master_Homework | SecondYear/TextMining/Homework/2048/games/game_2048_power_state_custom_score.py | game_2048_power_state_custom_score.py | py | 1,306 | python | en | code | 0 | github-code | 36 |
24389916144 | import os.path
import warnings
from collections import defaultdict
from itertools import chain
from . import builtin
from .. import options as opts
from .file_types import static_file
from .path import relname
from ..backends.compdb import writer as compdb
from ..backends.make import writer as make
from ..backends.ninja import writer as ninja
from ..build_inputs import build_input, Edge
from ..exceptions import ToolNotFoundError
from ..file_types import *
from ..iterutils import (first, flatten, iterate, listify, slice_dict, uniques,
unlistify)
from ..languages import known_formats
from ..objutils import convert_each, convert_one
from ..platforms import known_native_object_formats
from ..shell import posix as pshell
build_input('link_options')(lambda: {
'dynamic': defaultdict(list), 'static': defaultdict(list)
})
class Link(Edge):
msbuild_output = True
extra_kwargs = ()
def __init__(self, context, name, files, libs, packages, link_options,
lang=None, extra_deps=None, description=None):
build = context.build
name = relname(context, name)
self.name = self.__name(name)
self.user_libs = libs
forward_opts = opts.ForwardOptions.recurse(self.user_libs)
self.libs = self.user_libs + forward_opts.libs
self.user_packages = packages
self.packages = self.user_packages + forward_opts.packages
self.user_files = files
self.files = self.user_files + flatten(
getattr(i, 'extra_objects', []) for i in self.user_files
)
if ( len(self.files) == 0 and
not any(isinstance(i, WholeArchive) for i in self.user_libs) ):
raise ValueError('need at least one source file')
self.user_options = link_options
formats = uniques(i.format for i in chain(self.files, self.libs,
self.packages))
if len(formats) > 1:
raise ValueError('cannot link multiple object formats')
self.format = formats[0]
self.input_langs = uniques(chain(
(i.lang for i in self.files if i.lang is not None),
(j for i in self.libs for j in iterate(i.lang))
))
if not lang and not self.input_langs:
raise ValueError('unable to determine language')
self.langs = [lang] if lang else self.input_langs
self.linker = self.__find_linker(context.env, formats[0], self.langs)
# Forward any necessary options to the compile step.
if hasattr(self.linker, 'compile_options'):
compile_opts = self.linker.compile_options(self)
else:
compile_opts = opts.option_list()
compile_opts.extend(forward_opts.compile_options)
for i in self.files:
if hasattr(i.creator, 'add_extra_options'):
i.creator.add_extra_options(compile_opts)
extra_options = self.linker.pre_output(context, name, self)
self._fill_options(context.env, extra_options, forward_opts)
output = self.linker.output_file(name, self)
primary = first(output)
primary.package_deps.extend(self.packages)
self._fill_output(output)
options = self.options
public_output = self.linker.post_output(context, options, output, self)
primary.post_install = self.linker.post_install(options, output, self)
super().__init__(build, output, public_output, extra_deps, description)
build['defaults'].add(self.public_output)
@classmethod
def convert_args(cls, context, name, files, kwargs):
lang = kwargs.get('lang')
convert_each(kwargs, 'libs', context['library'],
kind=cls._preferred_lib, lang=lang)
convert_each(kwargs, 'packages', context['package'], lang=lang)
kwargs['link_options'] = pshell.listify(kwargs.get('link_options'),
type=opts.option_list)
intdir = ('{}.int/'.format(cls.__name(name))
if context.build['project']['intermediate_dirs'] else None)
intdir = kwargs.pop('intermediate_dir', intdir)
files = context['object_files'](
files, includes=kwargs.pop('includes', None),
pch=kwargs.pop('pch', None),
options=kwargs.pop('compile_options', None),
libs=kwargs['libs'], packages=kwargs['packages'], lang=lang,
directory=intdir,
extra_deps=kwargs.pop('extra_compile_deps', None)
)
return files, kwargs
def _get_linkers(self, env, langs):
yielded = False
for i in langs:
try:
linker = env.builder(i).linker(self.mode)
if linker:
yielded = True
yield linker
except ToolNotFoundError:
pass
if not yielded:
fmt = ('native' if self.format in known_native_object_formats
else self.format)
src_lang = known_formats[fmt].src_lang
yield env.builder(src_lang).linker(self.mode)
@classmethod
def __name(cls, name):
head, tail = os.path.split(name)
return os.path.join(head, cls._prefix + tail)
def __find_linker(self, env, format, langs):
for linker in self._get_linkers(env, langs):
if linker.can_link(format, langs):
return linker
raise ValueError('unable to find linker')
class DynamicLink(Link):
desc_verb = 'link'
base_mode = 'dynamic'
mode = 'executable'
msbuild_mode = 'Application'
_preferred_lib = 'shared'
_prefix = ''
extra_kwargs = ('entry_point', 'module_defs')
def __init__(self, *args, entry_point=None, module_defs=None, **kwargs):
self.entry_point = entry_point
self.module_defs = module_defs
super().__init__(*args, **kwargs)
@classmethod
def convert_args(cls, context, name, files, kwargs):
convert_one(kwargs, 'module_defs', context['module_def_file'])
return super().convert_args(context, name, files, kwargs)
@property
def options(self):
return self._internal_options + self.user_options
def flags(self, global_options=None):
return self.linker.flags(self.options, global_options, self.raw_output)
def lib_flags(self, global_options=None):
return self.linker.lib_flags(self.options, global_options)
def _fill_options(self, env, extra_options, forward_opts):
self._internal_options = opts.option_list(
opts.entry_point(self.entry_point) if self.entry_point else None,
opts.module_def(self.module_defs) if self.module_defs else None
)
if self.linker.needs_libs:
linkers = self._get_linkers(env, self.input_langs)
self._internal_options.collect(
(i.always_libs(i is self.linker) for i in linkers),
(opts.lib(i) for i in self.libs)
)
if self.linker.needs_package_options:
self._internal_options.collect(i.link_options(self.linker)
for i in self.packages)
self._internal_options.collect(extra_options,
forward_opts.link_options)
def _fill_output(self, output):
first(output).runtime_deps.extend(
i.runtime_file for i in self.libs if i.runtime_file
)
class SharedLink(DynamicLink):
desc_verb = 'shared-link'
mode = 'shared_library'
msbuild_mode = 'DynamicLibrary'
_prefix = 'lib'
extra_kwargs = DynamicLink.extra_kwargs + ('version', 'soversion')
def __init__(self, *args, version=None, soversion=None, **kwargs):
self.version = version
self.soversion = soversion
if (self.version is None) != (self.soversion is None):
raise ValueError('specify both version and soversion or neither')
super().__init__(*args, **kwargs)
class StaticLink(Link):
desc_verb = 'static-link'
base_mode = 'static'
mode = 'static_library'
msbuild_mode = 'StaticLibrary'
_preferred_lib = 'static'
_prefix = 'lib'
extra_kwargs = ('static_link_options',)
def __init__(self, *args, static_link_options=None, **kwargs):
self.user_static_options = static_link_options
super().__init__(*args, **kwargs)
@classmethod
def convert_args(cls, context, name, files, kwargs):
kwargs['static_link_options'] = pshell.listify(
kwargs.get('static_link_options'), type=opts.option_list
)
return super().convert_args(context, name, files, kwargs)
@property
def options(self):
return self._internal_options + self.user_static_options
def flags(self, global_options=None):
# Only pass the static-link options to the static linker. The other
# options are forwarded on to the dynamic linker when this library is
# used.
return self.linker.flags(self.options, global_options, self.raw_output)
def _fill_options(self, env, extra_options, forward_opts):
self._internal_options = extra_options
def _fill_output(self, output):
primary = first(output)
primary.forward_opts = opts.ForwardOptions(
link_options=self.user_options,
libs=self.user_libs,
packages=self.user_packages,
)
if hasattr(self.linker, 'forwarded_compile_options'):
primary.forward_opts.compile_options.extend(
self.linker.forwarded_compile_options(self)
)
primary.linktime_deps.extend(self.user_libs)
@builtin.function()
@builtin.type(Executable)
def executable(context, name, files=None, **kwargs):
if files is None and 'libs' not in kwargs:
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
return static_file(context, Executable, name, dist, params, kwargs)
files, kwargs = DynamicLink.convert_args(context, name, files, kwargs)
return DynamicLink(context, name, files, **kwargs).public_output
@builtin.function()
@builtin.type(SharedLibrary, extra_in_type=DualUseLibrary)
def shared_library(context, name, files=None, **kwargs):
if isinstance(name, DualUseLibrary):
if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:
raise TypeError('unexpected arguments')
return name.shared
if files is None and 'libs' not in kwargs:
# XXX: What to do for pre-built shared libraries for Windows, which has
# a separate DLL file?
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
return static_file(context, SharedLibrary, name, dist, params, kwargs)
files, kwargs = SharedLink.convert_args(context, name, files, kwargs)
return SharedLink(context, name, files, **kwargs).public_output
@builtin.function()
@builtin.type(StaticLibrary, extra_in_type=DualUseLibrary)
def static_library(context, name, files=None, **kwargs):
if isinstance(name, DualUseLibrary):
if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:
raise TypeError('unexpected arguments')
return name.static
if files is None and 'libs' not in kwargs:
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
return static_file(context, StaticLibrary, name, dist, params, kwargs)
files, kwargs = StaticLink.convert_args(context, name, files, kwargs)
return StaticLink(context, name, files, **kwargs).public_output
@builtin.function()
@builtin.type(Library, extra_in_type=DualUseLibrary)
def library(context, name, files=None, *, kind=None, **kwargs):
explicit_kind = False
if kind is not None:
explicit_kind = True
elif context.env.library_mode.shared and context.env.library_mode.static:
kind = 'dual'
elif context.env.library_mode.shared:
kind = 'shared'
elif context.env.library_mode.static:
kind = 'static'
if isinstance(name, DualUseLibrary):
if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:
raise TypeError('unexpected arguments')
return name if kind == 'dual' else getattr(name, kind)
if files is None and 'libs' not in kwargs:
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
file_type = StaticLibrary
if explicit_kind:
if kind == 'shared':
file_type = SharedLibrary
elif kind == 'dual':
raise ValueError(
"can't create dual-use libraries from an existing file"
)
# XXX: Try to detect if a string refers to a shared lib?
return static_file(context, file_type, name, dist, params, kwargs)
if kind is None:
raise ValueError('unable to create library: both shared and static ' +
'modes disabled')
shared_kwargs = slice_dict(kwargs, SharedLink.extra_kwargs)
static_kwargs = slice_dict(kwargs, StaticLink.extra_kwargs)
shared_kwargs.update(kwargs)
static_kwargs.update(kwargs)
if kind == 'dual':
shared_files, shared_kwargs = SharedLink.convert_args(
context, name, files, shared_kwargs
)
shared = SharedLink(context, name, shared_files, **shared_kwargs)
if not shared.linker.builder.can_dual_link:
warnings.warn('dual linking not supported with {}'
.format(shared.linker.brand))
return shared.public_output
static_files, static_kwargs = StaticLink.convert_args(
context, name, shared_files, static_kwargs
)
static = StaticLink(context, name, static_files, **static_kwargs)
return DualUseLibrary(shared.public_output, static.public_output)
elif kind == 'shared':
files, kw = SharedLink.convert_args(context, name, files,
shared_kwargs)
return SharedLink(context, name, files, **kw).public_output
else: # kind == 'static'
files, kw = StaticLink.convert_args(context, name, files,
static_kwargs)
return StaticLink(context, name, files, **kw).public_output
@builtin.function()
@builtin.type(WholeArchive, extra_in_type=StaticLibrary)
def whole_archive(context, name, *args, **kwargs):
if isinstance(name, StaticLibrary):
if len(args) or len(kwargs):
raise TypeError('unexpected arguments')
return WholeArchive(name)
else:
return WholeArchive(context['static_library'](name, *args, **kwargs))
@builtin.function()
def global_link_options(context, options, family='native', mode='dynamic'):
for i in iterate(family):
context.build['link_options'][mode][i].extend(pshell.listify(options))
def _get_flags(backend, rule, build_inputs, buildfile):
variables = {}
cmd_kwargs = {}
linker = rule.linker
if hasattr(linker, 'flags_var') or hasattr(linker, 'libs_var'):
gopts = build_inputs['link_options'][rule.base_mode][linker.family]
if hasattr(linker, 'flags_var'):
global_ldflags, ldflags = backend.flags_vars(
linker.flags_var,
linker.global_flags + linker.flags(gopts, mode='global'),
buildfile
)
cmd_kwargs['flags'] = ldflags
flags = rule.flags(gopts)
if flags:
variables[ldflags] = [global_ldflags] + flags
if hasattr(linker, 'libs_var'):
global_ldlibs, ldlibs = backend.flags_vars(
linker.libs_var,
linker.global_libs + linker.lib_flags(gopts, mode='global'),
buildfile
)
cmd_kwargs['libs'] = ldlibs
lib_flags = rule.lib_flags(gopts)
if lib_flags:
variables[ldlibs] = [global_ldlibs] + lib_flags
if hasattr(rule, 'manifest'):
var = backend.var('manifest')
cmd_kwargs['manifest'] = var
variables[var] = rule.manifest
return variables, cmd_kwargs
@make.rule_handler(StaticLink, DynamicLink, SharedLink)
def make_link(rule, build_inputs, buildfile, env):
linker = rule.linker
variables, cmd_kwargs = _get_flags(make, rule, build_inputs, buildfile)
output_params = []
if linker.num_outputs == 'all':
output_vars = make.qvar('@')
else:
output_vars = []
for i in range(linker.num_outputs):
v = make.var(str(i + 2))
output_vars.append(v)
output_params.append(rule.output[i])
recipename = make.var('RULE_{}'.format(linker.rule_name.upper()))
if not buildfile.has_variable(recipename):
buildfile.define(recipename, [linker(
make.var('1'), output_vars, **cmd_kwargs
)])
files = rule.files
if hasattr(linker, 'transform_input'):
files = linker.transform_input(files)
package_build_deps = flatten(i.deps for i in rule.packages)
module_defs = listify(getattr(rule, 'module_defs', None))
manifest = listify(getattr(rule, 'manifest', None))
make.multitarget_rule(
build_inputs, buildfile,
targets=rule.output,
deps=(rule.files + rule.libs + package_build_deps + module_defs +
manifest + rule.extra_deps),
order_only=make.directory_deps(rule.output),
recipe=make.Call(recipename, files, *output_params),
variables=variables
)
@ninja.rule_handler(StaticLink, DynamicLink, SharedLink)
def ninja_link(rule, build_inputs, buildfile, env):
linker = rule.linker
variables, cmd_kwargs = _get_flags(ninja, rule, build_inputs, buildfile)
if rule.description:
variables['description'] = rule.description
if linker.num_outputs == 'all':
output_vars = ninja.var('out')
elif linker.num_outputs == 1:
output_vars = ninja.var('output')
variables[output_vars] = rule.output[0]
else:
output_vars = []
for i in range(linker.num_outputs):
v = ninja.var('output{}'.format(i + 1))
output_vars.append(v)
variables[v] = rule.output[i]
if hasattr(linker, 'transform_input'):
input_var = ninja.var('input')
variables[input_var] = linker.transform_input(rule.files)
else:
input_var = ninja.var('in')
if not buildfile.has_rule(linker.rule_name):
buildfile.rule(name=linker.rule_name, command=linker(
input_var, output_vars, **cmd_kwargs
), description=rule.desc_verb + ' => ' + first(output_vars))
package_build_deps = flatten(i.deps for i in rule.packages)
module_defs = listify(getattr(rule, 'module_defs', None))
manifest = listify(getattr(rule, 'manifest', None))
buildfile.build(
output=rule.output,
rule=linker.rule_name,
inputs=rule.files,
implicit=(rule.libs + package_build_deps + module_defs + manifest +
rule.extra_deps),
variables=variables
)
@compdb.rule_handler(StaticLink, DynamicLink, SharedLink)
def compdb_link(rule, build_inputs, buildfile, env):
linker = rule.linker
cmd_kwargs = {}
if hasattr(linker, 'flags_var') or hasattr(linker, 'libs_var'):
gopts = build_inputs['link_options'][rule.base_mode][linker.family]
if hasattr(linker, 'flags_var'):
cmd_kwargs['flags'] = (linker.global_flags +
linker.flags(gopts, mode='global') +
rule.flags(gopts))
if hasattr(linker, 'libs_var'):
cmd_kwargs['libs'] = (linker.global_libs +
linker.lib_flags(gopts, mode='global') +
rule.lib_flags(gopts))
if hasattr(rule, 'manifest'):
cmd_kwargs['manifest'] = rule.manifest
file = rule.files[0] if len(rule.files) else rule.user_libs[0]
in_files = rule.files
if hasattr(linker, 'transform_input'):
in_files = linker.transform_input(in_files)
output = unlistify(rule.output if linker.num_outputs == 'all'
else rule.output[0:linker.num_outputs])
buildfile.append(
arguments=linker(in_files, output, **cmd_kwargs),
file=file, output=first(rule.public_output)
)
try:
from .compile import CompileHeader
from ..backends.msbuild import writer as msbuild
def _parse_compiler_cflags(compiler, global_options):
return compiler.parse_flags(msbuild.textify_each(
compiler.global_flags +
compiler.flags(global_options[compiler.lang], mode='global')
))
def _parse_file_cflags(file, global_options, include_compiler=False):
compiler = file.creator.compiler
gopts = global_options[compiler.lang]
cflags = file.creator.flags(gopts)
if include_compiler:
cflags = (compiler.global_flags +
compiler.flags(gopts, mode='global') +
cflags)
return compiler.parse_flags(msbuild.textify_each(cflags))
def _parse_ldflags(rule, global_options):
linker = rule.linker
gopts = global_options[rule.base_mode][linker.family]
primary = first(rule.output)
ldflags = [linker.global_flags + linker.flags(gopts) +
rule.flags(gopts)]
if hasattr(rule.linker, 'libs_var'):
ldflags.append(linker.global_libs + linker.lib_flags(gopts) +
rule.lib_flags(gopts))
link_options = linker.parse_flags(
*[msbuild.textify_each(i) for i in ldflags]
)
if hasattr(primary, 'import_lib'):
link_options['import_lib'] = primary.import_lib
return link_options
@msbuild.rule_handler(DynamicLink, SharedLink, StaticLink)
def msbuild_link(rule, build_inputs, solution, env):
if ( any(i not in ['c', 'c++', 'rc'] for i in rule.input_langs) or
rule.linker.flavor != 'msvc' ):
raise ValueError('msbuild backend currently only supports c/c++ ' +
'with msvc')
global_compile_opts = build_inputs['compile_options']
global_link_opts = build_inputs['link_options']
# Parse compilation flags; if there's only one set of them (i.e. the
# command_var is the same for every compiler), we can apply these to
# all the files at once. Otherwise, we need to apply them to each file
# individually so they all get the correct options.
obj_creators = [i.creator for i in rule.files]
compilers = uniques(i.compiler for i in obj_creators)
if len(uniques(i.command_var for i in compilers)) == 1:
common_compile_options = _parse_compiler_cflags(
compilers[0], global_compile_opts
)
else:
common_compile_options = None
deps = chain(
(i.creator.file for i in rule.files),
chain.from_iterable(i.creator.include_deps for i in rule.files),
chain.from_iterable(i.creator.extra_deps for i in rule.files),
filter(None, (getattr(i.creator, 'pch_source', None)
for i in rule.files)),
rule.libs, rule.extra_deps
)
def get_source(file):
# Get the source file for this compilation rule; it's either a
# regular source file or a PCH source file.
if isinstance(file.creator, CompileHeader):
return file.creator.pch_source
return file.creator.file
# MSBuild doesn't build anything if it thinks there are no object files
# to link. This is a problem for building libraries with no sources
# that link to a whole-archive (a fairly-common way of making a shared
# library out of a static one). To get around this, explicitly add the
# whole-archive as an object file to link, in addition to passing
# `/WHOLEARCHIVE:foo` as usual.
objs = []
if not rule.files:
for i in rule.libs:
if isinstance(i, WholeArchive):
objs.append(i.library)
# Create the project file.
project = msbuild.VcxProject(
env, name=rule.name,
mode=rule.msbuild_mode,
output_file=first(rule.output),
files=[{
'name': get_source(i),
'options': _parse_file_cflags(
i, global_compile_opts,
include_compiler=(common_compile_options is None)
),
} for i in rule.files],
objs=objs,
compile_options=common_compile_options,
link_options=_parse_ldflags(rule, global_link_opts),
dependencies=solution.dependencies(deps),
)
solution[first(rule.public_output)] = project
except ImportError: # pragma: no cover
pass
| jimporter/bfg9000 | bfg9000/builtins/link.py | link.py | py | 25,450 | python | en | code | 73 | github-code | 36 |
10865887350 | #
# Python based OpenWhisk action that sleeps for the specified number
# of milliseconds before returning.
# The function actually sleeps slightly longer than requested.
#
# @param parm Object with Number property sleepTimeInMs
# @returns Object with String property msg describing how long the function slept
#
import sys
import time
def main(parm):
sleepTimeInMs = parm.get("sleepTimeInMs", 1)
print("Specified sleep time is {} ms.".format(sleepTimeInMs))
result = { "msg": "Terminated successfully after around {} ms.".format(sleepTimeInMs) }
time.sleep(sleepTimeInMs/1000.0)
print(result['msg'])
return result
| seashell-hb/incubator-openwhisk | tests/dat/actions/sleep.py | sleep.py | py | 642 | python | en | code | null | github-code | 36 |
21331900027 | import numpy as np
import pandas as pd
from os.path import join, dirname
from copy import deepcopy
from sostrades_core.tools.base_functions.exp_min import compute_dfunc_with_exp_min, compute_func_with_exp_min
from climateeconomics.core.core_resources.resource_model.resource_model import ResourceModel
from energy_models.core.stream_type.resources_models.resource_glossary import ResourceGlossary
from climateeconomics.core.tools.Hubbert_Curve import compute_Hubbert_regression
class OrderOfMagnitude():
KILO = 'k'
# USD_PER_USton = 'USD/USton'
# MILLION_TONNES='million_tonnes'
magnitude_factor = {
KILO: 10 ** 3
# USD_PER_USton:1/0.907
# MILLION_TONNES: 10**6
}
class PlatinumResourceModel(ResourceModel):
"""
Resource pyworld3
General implementation of a resource pyworld3, to be inherited by specific models for each type of resource
"""
resource_name=ResourceGlossary.Platinum['name']
def configure_parameters(self, inputs_dict):
super().configure_parameters(inputs_dict)
self.regression_stop = inputs_dict['regression_stop']
self.world_consumption = inputs_dict['world_consumption']
self.resource_max_price = inputs_dict['resource_max_price']
#Units conversion
#To convert from 1E6 oil_barrel to Mt
conversion_factor = 1
def convert_demand(self, demand):
self.resource_demand=demand
self.resource_demand[self.resource_name]=demand[self.resource_name]#*self.conversion_factor
def get_global_demand(self, demand):
global_demand = self.world_consumption['total']
self.resource_demand=demand
self.resource_demand[self.resource_name]=demand[self.resource_name] + global_demand
self.conversion_factor = 1
def compute_predictable_production(self):
'''
For each resource_type inside resource pyworld3, compute predictable production through Hubbert regression function
'''
centered_resource_production_data = self.resource_production_data.loc[self.resource_production_data['years'] <= self.regression_stop]
for resource_type in self.sub_resource_list:
self.predictable_production[resource_type] = compute_Hubbert_regression(
centered_resource_production_data, self.production_years, self.production_start, resource_type)
def compute_price(self):
"""
price function depends on the ratio use_stock/demand
price(ratio) = (price_max - price_min)(1-ratio) + price_min
"""
# dataframe initialization
self.resource_price['price'] = np.insert(np.zeros(len(self.years)-1), 0, self.resource_price_data.loc[0, 'price'])
resource_price_dict = self.resource_price.to_dict()
self.resource_demand = self.resources_demand[['years', self.resource_name]]
self.get_global_demand(self.resource_demand)
demand = deepcopy(self.resource_demand[self.resource_name].values)
demand_limited = compute_func_with_exp_min(
np.array(demand), 1.0e-10)
self.ratio_usable_demand = np.maximum(self.use_stock[self.sub_resource_list[0]].values / demand_limited, 1E-15)
for year_cost in self.years[1:] :
resource_price_dict['price'][year_cost] = \
(self.resource_max_price - self.resource_price_data.loc[0, 'price']) *\
(1- self.ratio_usable_demand[year_cost - self.year_start]) + self.resource_price_data.loc[0, 'price']
self.resource_price= pd.DataFrame.from_dict(resource_price_dict)
def get_d_price_d_demand (self, year_start, year_end, nb_years, grad_use, grad_price):
ascending_price_resource_list = list(
self.resource_price_data.sort_values(by=['price'])['resource_type'])
demand = deepcopy(self.resource_demand[self.resource_name].values)
demand_limited = compute_func_with_exp_min(np.array(demand), 1.0e-10)
grad_demand_limited = compute_dfunc_with_exp_min(np.array(demand), 1.0e-10)
self.ratio_usable_demand = np.maximum(self.use_stock[self.sub_resource_list[0]].values / demand_limited, 1E-15)
# # ------------------------------------------------
# # price is cst *u/v function with u = use and v = demand
# # price gradient is cst * (u'v - uv') / v^2
for year_demand in range(year_start + 1, year_end + 1):
for resource_type in ascending_price_resource_list:
for year in range(year_start + 1, year_demand + 1):
#grad_price = cst * u'v / v^2 (cst < 0)
if self.use_stock[self.sub_resource_list[0]][year_demand]/demand_limited[year_demand - year_start] > 1E-15 :
grad_price[year_demand - year_start, year - year_start] = \
- grad_use[resource_type][year_demand - year_start, year - year_start]
## grad_price -= cst * uv' / v^2 (cst < 0)
if year == year_demand :
grad_price[year_demand - year_start, year - year_start] += self.use_stock[self.sub_resource_list[0]][year_demand]\
* self.conversion_factor / demand_limited[year_demand - year_start]
grad_price[year_demand - year_start, year - year_start] = grad_price[year_demand - year_start, year - year_start] *\
(self.resource_max_price - self.resource_price_data.loc[0, 'price']) *\
grad_demand_limited[year_demand - year_start]/ demand_limited[year_demand - year_start]
return grad_price
| os-climate/witness-core | climateeconomics/core/core_resources/models/platinum_resource/platinum_resource_model.py | platinum_resource_model.py | py | 5,764 | python | en | code | 7 | github-code | 36 |
31347480186 | from PIL import Image
from gcbmanimation.util.tempfile import TempFileManager
Image.MAX_IMAGE_PIXELS = None
class Frame:
'''
Represents a presentation-format image that can be included in an animation.
A frame usually applies to a particular year and points to an image file on disk.
Arguments:
'year' -- the year this Frame applies to.
'path' -- the path to the image file this Frame represents.
'''
def __init__(self, year, path, scale=None):
self._year = year
self._path = path
self._scale = scale
@property
def year(self):
'''The year this Frame applies to.'''
return self._year
@property
def path(self):
'''The path to the Frame's image file.'''
return self._path
@property
def scale(self):
'''
The scale (in metres per pixel) of the image, where None means
unknown or not applicable.
'''
return self._scale
@property
def size(self):
'''The width and height of the image.'''
return Image.open(self._path).size
def composite(self, frame, send_to_bottom=False):
'''
Combines another RGBA Frame with this one using their alpha channels.
Arguments:
'frame' -- the frame to combine with this one.
'send_to_bottom' -- use the other frame as the background instead of
this one.
Returns the merged image as a new Frame with the same year as this one.
'''
out_path = TempFileManager.mktmp(suffix=".png")
this_image = Image.open(self._path)
other_image = Image.open(frame.path)
if send_to_bottom:
Image.alpha_composite(other_image, this_image).save(out_path)
else:
Image.alpha_composite(this_image, other_image).save(out_path)
return Frame(self._year, out_path, self._scale)
def merge_horizontal(self, *frames):
'''
Merges one or more Frames horizontally with this one.
Arguments:
'frames' -- one or more Frames to merge horizontally.
Returns the merged image as a new Frame with the same year as this one.
'''
images = [Image.open(self._path)] + [Image.open(frame.path) for frame in frames]
widths, heights = zip(*(image.size for image in images))
total_width = sum(widths)
max_height = max(heights)
merged_image = Image.new("RGBA", (total_width, max_height), color=(255, 255, 255, 255))
x_offset = 0
for image in images:
merged_image.paste(image, (x_offset, 0))
x_offset += image.size[0]
out_path = TempFileManager.mktmp(suffix=".png")
merged_image.save(out_path)
return Frame(self._year, out_path, scale=None)
def resize(self, max_width, max_height):
'''
Resizes the image as closely as possible to the specified width and height
while preserving the aspect ratio.
Arguments:
'max_width' -- the new maximum width.
'max_height' -- the new maximum height.
Returns the resized image as a new Frame with the same year as this one
and updated scale reflecting the new pixel size in metres.
'''
original_width, original_height = self.size
aspect_ratio = original_width / original_height
if aspect_ratio > 1:
new_width = max_width
new_height = int(new_width / aspect_ratio)
if new_height > max_height:
new_height = max_height
new_width = int(new_height * aspect_ratio)
else:
new_height = max_height
new_width = int(new_height * aspect_ratio)
if new_width > max_width:
new_width = max_width
new_height = int(new_width / aspect_ratio)
out_path = TempFileManager.mktmp(suffix=".png")
Image.open(self.path).resize((new_width, new_height), Image.ANTIALIAS).save(out_path)
new_scale = self._scale * (original_width / new_width) if self._scale else None
return Frame(self._year, out_path, new_scale)
| moja-global/GCBM.Animation | gcbmanimation/animator/frame.py | frame.py | py | 4,162 | python | en | code | 1 | github-code | 36 |
8379980336 | from sys import argv
from panda3d.core import Vec3
from pandac.PandaModules import loadPrcFileData
loadPrcFileData('configurate', 'window-title Loading')
from direct.directbase import DirectStart
from direct.task import Task
from direct.actor.Actor import Actor
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.showbase.InputStateGlobal import inputState
from direct.controls.GravityWalker import GravityWalker
from direct.showbase import DirectObject
from direct.interval.IntervalGlobal import *
import urllib, os, __main__, random
from pandac.PandaModules import *
from random import choice
base.disableMouse()
title = OnscreenImage(image='phase_3/maps/Game_Toontown_Logo_1.jpg', pos=(0, 0, 0.0), parent=render2d)
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
title.destroy()
props = WindowProperties()
props.setTitle('League Of Toons')
base.win.requestProperties(props)
from panda3d.core import *
class guitools():
def createFrame(self, filepath, resX = None, resY = None):
yresolution = 600
tex = loader.loadTexture(filepath)
tex.setBorderColor(Vec4(0, 0, 0, 0))
tex.setWrapU(Texture.WMRepeat)
tex.setWrapV(Texture.WMRepeat)
cm = CardMaker(filepath + ' card')
if resX == None:
resX = tex.getOrigFileXSize()
if resY == None:
resY = tex.getOrigFileySize()
cm.setFrame(-resX, resX, -resY, resY)
card = NodePath(cm.generate())
card.setTexture(tex)
card.flattenLight()
card.setScale(card.getScale() / yresolution)
return card
def createButton(self, cmd, position, hpr, model, buttonImgs, scale, colour = (1, 1, 1, 1), text = ''):
ButtonImage = loader.loadModel(model)
ButtonImageUp = ButtonImage.find('**/' + buttonImgs[0])
ButtonImageDown = ButtonImage.find('**/' + buttonImgs[1])
ButtonImageRollover = ButtonImage.find('**/' + buttonImgs[-1])
return DirectButton(frameSize=None, image=(ButtonImageUp, ButtonImageDown, ButtonImageRollover), relief=None, command=cmd, geom=None, pad=(0.01, 0.01), text=text, suppressKeys=0, pos=position, hpr=hpr, text_fg=(1, 1, 1, 1), color=colour, text_scale=0.059, borderWidth=(0.13, 0.01), scale=scale)
class ClassicBook():
Book = loader.loadModel('phase_3.5/models/gui/stickerbook_gui.bam')
def __beingOpened__(self):
self.BookClose.hide()
try:
base.localAvatar.b_setAnimState('OpenBook')
except:
pass
return True
def __openBook__(self):
seq = Sequence()
seq.append(Func(self.__beingOpened__))
seq.append(Wait(0.3))
seq.append(Func(self.__addNavs__))
seq.start()
return True
def __delNavs__(self):
self.bg[0].hide()
self.bg[1].hide()
self.BookOpen.removeNode()
self.__addOnButton__()
try:
base.localAvatar.b_setAnimState('CloseBook')
base.localAvatar.physControls.enableAvatarControls()
except:
pass
return True
def __addNavs__(self):
self.BookClose.removeNode()
self.bg = []
for b in range(2):
self.bg.append(Guitools.createFrame('phase_3.5/maps/Book.jpg', base.win.getXSize() + 160, base.win.getYSize() + 125))
self.bg.append(Guitools.createFrame('phase_3.5/maps/big_book.jpg',600,450))
self.bg[b].reparentTo(aspect2d)
self.bg[0].setPos(0, 0, 0)
self.bg[1].setPos(0, 0, 0.1)
self.BookOpen = DirectButton(frameSize=None, image=(self.Book.find('**/BookIcon_OPEN'), self.Book.find('**/BookIcon_CLSD'), self.Book.find('**/BookIcon_RLVR2')), relief=None, command=self.__delNavs__, text='', text_pos=(0, -0.015), geom=None, scale=0.305, pad=(0.01, 0.01), suppressKeys=0, pos=(1.16, 0, -0.83), hpr=(0, 0, 0), text_scale=0.06, borderWidth=(0.015, 0.01))
try:
base.localAvatar.b_setAnimState('ReadBook')
except:
pass
return True
def __addOnButton__(self):
self.BookClose = DirectButton(frameSize=None, image=(self.Book.find('**/BookIcon_CLSD'), self.Book.find('**/BookIcon_OPEN'), self.Book.find('**/BookIcon_RLVR')), relief=None, command=self.__openBook__, text='', text_pos=(0, -0.015), geom=None, scale=0.305, pad=(0.01, 0.01), suppressKeys=0, pos=(1.16, 0, -0.83), hpr=(0, 0, 0), text_scale=0.06, borderWidth=(0.015, 0.01))
return True
def __init__(self):
self.__addOnButton__()
Guitools = guitools()
BookGui = ClassicBook()
base.disableMouse()
legsAnimDict = {'right-hand-start': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_right-hand-start.bam',
'firehose': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_firehose.bam',
'rotateL-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_rotateL-putt.bam',
'slip-forward': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_slip-forward.bam',
'catch-eatnrun': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_eatnrun.bam',
'tickle': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_tickle.bam',
'water-gun': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_water-gun.bam',
'leverNeutral': 'phase_10/models/char/tt_a_chr_dgs_shorts_legs_leverNeutral.bam',
'swim': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_swim.bam',
'catch-run': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_gamerun.bam',
'sad-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_sad-neutral.bam',
'pet-loop': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_petloop.bam',
'jump-squat': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump-zstart.bam',
'wave': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_wave.bam',
'reel-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_reelneutral.bam',
'pole-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_poleneutral.bam',
'bank': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_jellybeanJar.bam',
'scientistGame': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_scientistGame.bam',
'right-hand': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_right-hand.bam',
'lookloop-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_lookloop-putt.bam',
'victory': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_victory-dance.bam',
'lose': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_lose.bam',
'cringe': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_cringe.bam',
'right': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_right.bam',
'headdown-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_headdown-putt.bam',
'conked': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_conked.bam',
'jump': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump.bam',
'into-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_into-putt.bam',
'fish-end': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_fishEND.bam',
'running-jump-land': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_leap_zend.bam',
'shrug': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_shrug.bam',
'sprinkle-dust': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_sprinkle-dust.bam',
'hold-bottle': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_hold-bottle.bam',
'takePhone': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_takePhone.bam',
'melt': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_melt.bam',
'pet-start': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_petin.bam',
'look-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_look-putt.bam',
'loop-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_loop-putt.bam',
'good-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_good-putt.bam',
'juggle': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_juggle.bam',
'run': 'phase_3/models/char/tt_a_chr_dgs_shorts_legs_run.bam',
'pushbutton': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_press-button.bam',
'sidestep-right': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump-back-right.bam',
'water': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_water.bam',
'right-point-start': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_right-point-start.bam',
'bad-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_bad-putt.bam',
'struggle': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_struggle.bam',
'running-jump': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_running-jump.bam',
'callPet': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_callPet.bam',
'throw': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_pie-throw.bam',
'catch-eatneutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_eat_neutral.bam',
'tug-o-war': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_tug-o-war.bam',
'bow': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_bow.bam',
'swing': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_swing.bam',
'climb': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_climb.bam',
'scientistWork': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_scientistWork.bam',
'think': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_think.bam',
'catch-intro-throw': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_gameThrow.bam',
'walk': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_walk.bam',
'down': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_down.bam',
'pole': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_pole.bam',
'periscope': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_periscope.bam',
'duck': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_duck.bam',
'curtsy': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_curtsy.bam',
'jump-land': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump-zend.bam',
'loop-dig': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_loop_dig.bam',
'angry': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_angry.bam',
'bored': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_bored.bam',
'swing-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_swing-putt.bam',
'pet-end': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_petend.bam',
'spit': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_spit.bam',
'right-point': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_right-point.bam',
'start-dig': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_into_dig.bam',
'castlong': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_castlong.bam',
'confused': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_confused.bam',
'neutral': 'phase_3/models/char/tt_a_chr_dgs_shorts_legs_neutral.bam',
'jump-idle': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump-zhang.bam',
'reel': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_reel.bam',
'slip-backward': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_slip-backward.bam',
'sound': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_shout.bam',
'sidestep-left': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_sidestep-left.bam',
'up': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_up.bam',
'fish-again': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_fishAGAIN.bam',
'cast': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_cast.bam',
'phoneBack': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_phoneBack.bam',
'phoneNeutral': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_phoneNeutral.bam',
'scientistJealous': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_scientistJealous.bam',
'battlecast': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_fish.bam',
'sit-start': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_intoSit.bam',
'toss': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_toss.bam',
'happy-dance': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_happy-dance.bam',
'running-jump-squat': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_leap_zstart.bam',
'teleport': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_teleport.bam',
'sit': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_sit.bam',
'sad-walk': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_losewalk.bam',
'give-props-start': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_give-props-start.bam',
'book': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_book.bam',
'running-jump-idle': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_leap_zhang.bam',
'scientistEmcee': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_scientistEmcee.bam',
'leverPull': 'phase_10/models/char/tt_a_chr_dgs_shorts_legs_leverPull.bam',
'tutorial-neutral': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_tutorial-neutral.bam',
'badloop-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_badloop-putt.bam',
'give-props': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_give-props.bam',
'hold-magnet': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_hold-magnet.bam',
'hypnotize': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_hypnotize.bam',
'left-point': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_left-point.bam',
'leverReach': 'phase_10/models/char/tt_a_chr_dgs_shorts_legs_leverReach.bam',
'feedPet': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_feedPet.bam',
'reel-H': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_reelH.bam',
'applause': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_applause.bam',
'smooch': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_smooch.bam',
'rotateR-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_rotateR-putt.bam',
'fish-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_fishneutral.bam',
'push': 'phase_9/models/char/tt_a_chr_dgs_shorts_legs_push.bam',
'catch-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_gameneutral.bam',
'left': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_left.bam'}
torsoAnimDict = {'right-hand-start': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_right-hand-start.bam',
'firehose': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_firehose.bam',
'rotateL-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_rotateL-putt.bam',
'slip-forward': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_slip-forward.bam',
'catch-eatnrun': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_eatnrun.bam',
'tickle': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_tickle.bam',
'water-gun': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_water-gun.bam',
'leverNeutral': 'phase_10/models/char/tt_a_chr_dgl_shorts_torso_leverNeutral.bam',
'swim': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_swim.bam',
'catch-run': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_gamerun.bam',
'sad-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_sad-neutral.bam',
'pet-loop': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_petloop.bam',
'jump-squat': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump-zstart.bam',
'wave': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_wave.bam',
'reel-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_reelneutral.bam',
'pole-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_poleneutral.bam',
'bank': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_jellybeanJar.bam',
'scientistGame': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_scientistGame.bam',
'right-hand': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_right-hand.bam',
'lookloop-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_lookloop-putt.bam',
'victory': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_victory-dance.bam',
'lose': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_lose.bam',
'cringe': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_cringe.bam',
'right': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_right.bam',
'headdown-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_headdown-putt.bam',
'conked': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_conked.bam',
'jump': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump.bam',
'into-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_into-putt.bam',
'fish-end': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_fishEND.bam',
'running-jump-land': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_leap_zend.bam',
'shrug': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_shrug.bam',
'sprinkle-dust': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_sprinkle-dust.bam',
'hold-bottle': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_hold-bottle.bam',
'takePhone': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_takePhone.bam',
'melt': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_melt.bam',
'pet-start': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_petin.bam',
'look-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_look-putt.bam',
'loop-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_loop-putt.bam',
'good-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_good-putt.bam',
'juggle': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_juggle.bam',
'run': 'phase_3/models/char/tt_a_chr_dgl_shorts_torso_run.bam',
'pushbutton': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_press-button.bam',
'sidestep-right': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump-back-right.bam',
'water': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_water.bam',
'right-point-start': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_right-point-start.bam',
'bad-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_bad-putt.bam',
'struggle': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_struggle.bam',
'running-jump': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_running-jump.bam',
'callPet': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_callPet.bam',
'throw': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_pie-throw.bam',
'catch-eatneutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_eat_neutral.bam',
'tug-o-war': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_tug-o-war.bam',
'bow': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_bow.bam',
'swing': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_swing.bam',
'climb': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_climb.bam',
'scientistWork': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_scientistWork.bam',
'think': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_think.bam',
'catch-intro-throw': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_gameThrow.bam',
'walk': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_walk.bam',
'down': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_down.bam',
'pole': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_pole.bam',
'periscope': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_periscope.bam',
'duck': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_duck.bam',
'curtsy': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_curtsy.bam',
'jump-land': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump-zend.bam',
'loop-dig': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_loop_dig.bam',
'angry': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_angry.bam',
'bored': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_bored.bam',
'swing-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_swing-putt.bam',
'pet-end': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_petend.bam',
'spit': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_spit.bam',
'right-point': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_right-point.bam',
'start-dig': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_into_dig.bam',
'castlong': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_castlong.bam',
'confused': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_confused.bam',
'neutral': 'phase_3/models/char/tt_a_chr_dgl_shorts_torso_neutral.bam',
'jump-idle': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump-zhang.bam',
'reel': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_reel.bam',
'slip-backward': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_slip-backward.bam',
'sound': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_shout.bam',
'sidestep-left': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_sidestep-left.bam',
'up': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_up.bam',
'fish-again': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_fishAGAIN.bam',
'cast': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_cast.bam',
'phoneBack': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_phoneBack.bam',
'phoneNeutral': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_phoneNeutral.bam',
'scientistJealous': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_scientistJealous.bam',
'battlecast': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_fish.bam',
'sit-start': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_intoSit.bam',
'toss': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_toss.bam',
'happy-dance': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_happy-dance.bam',
'running-jump-squat': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_leap_zstart.bam',
'teleport': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_teleport.bam',
'sit': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_sit.bam',
'sad-walk': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_losewalk.bam',
'give-props-start': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_give-props-start.bam',
'book': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_book.bam',
'running-jump-idle': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_leap_zhang.bam',
'scientistEmcee': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_scientistEmcee.bam',
'leverPull': 'phase_10/models/char/tt_a_chr_dgl_shorts_torso_leverPull.bam',
'tutorial-neutral': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_tutorial-neutral.bam',
'badloop-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_badloop-putt.bam',
'give-props': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_give-props.bam',
'hold-magnet': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_hold-magnet.bam',
'hypnotize': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_hypnotize.bam',
'left-point': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_left-point.bam',
'leverReach': 'phase_10/models/char/tt_a_chr_dgl_shorts_torso_leverReach.bam',
'feedPet': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_feedPet.bam',
'reel-H': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_reelH.bam',
'applause': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_applause.bam',
'smooch': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_smooch.bam',
'rotateR-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_rotateR-putt.bam',
'fish-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_fishneutral.bam',
'push': 'phase_9/models/char/tt_a_chr_dgl_shorts_torso_push.bam',
'catch-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_gameneutral.bam',
'left': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_left.bam'}
DuckHead = loader.loadModel('phase_3/models/char/duck-heads-1000.bam')
otherParts = DuckHead.findAllMatches('**/*long*')
for partNum in range(0, otherParts.getNumPaths()):
otherParts.getPath(partNum).removeNode()
ntrlMuzzle = DuckHead.find('**/*muzzle*neutral')
otherParts = DuckHead.findAllMatches('**/*muzzle*')
for partNum in range(0, otherParts.getNumPaths()):
part = otherParts.getPath(partNum)
if part != ntrlMuzzle:
otherParts.getPath(partNum).removeNode()
DuckTorso = loader.loadModel('phase_3/models/char/tt_a_chr_dgl_shorts_torso_1000.bam')
DuckLegs = loader.loadModel('phase_3/models/char/tt_a_chr_dgs_shorts_legs_1000.bam')
otherParts = DuckLegs.findAllMatches('**/boots*') + DuckLegs.findAllMatches('**/shoes')
for partNum in range(0, otherParts.getNumPaths()):
otherParts.getPath(partNum).removeNode()
DuckBody = Actor({'head': DuckHead,
'torso': DuckTorso,
'legs': DuckLegs}, {'torso': torsoAnimDict,
'legs': legsAnimDict})
DuckBody.attach('head', 'torso', 'def_head')
DuckBody.attach('torso', 'legs', 'joint_hips')
gloves = DuckBody.findAllMatches('**/hands')
ears = DuckBody.findAllMatches('**/*ears*')
head = DuckBody.findAllMatches('**/head-*')
sleeves = DuckBody.findAllMatches('**/sleeves')
shirt = DuckBody.findAllMatches('**/torso-top')
shorts = DuckBody.findAllMatches('**/torso-bot')
neck = DuckBody.findAllMatches('**/neck')
arms = DuckBody.findAllMatches('**/arms')
legs = DuckBody.findAllMatches('**/legs')
feet = DuckBody.findAllMatches('**/feet')
bodyNodes = []
bodyNodes += [gloves]
bodyNodes += [head, ears]
bodyNodes += [sleeves, shirt, shorts]
bodyNodes += [neck,
arms,
legs,
feet]
bodyNodes[0].setColor(1, 1, 1, 1)
bodyNodes[1].setColor(1, 0.5, 0, 1)
bodyNodes[2].setColor(1, 0.5, 0, 1)
bodyNodes[3].setColor(0.264, 0.308, 0.676, 1)
bodyNodes[4].setColor(0.264, 0.308, 0.676, 1)
bodyNodes[5].setColor(1, 1, 1, 1)
bodyNodes[6].setColor(1, 0.5, 0, 1)
bodyNodes[7].setColor(1, 0.5, 0, 1)
bodyNodes[8].setColor(0.276, 0.872, 0.36, 1)
bodyNodes[9].setColor(0.276, 0.872, 0.36, 1)
topTex = loader.loadTexture('phase_3/maps/desat_shirt_5.jpg')
botTex = loader.loadTexture('phase_4/maps/CowboyShorts1.jpg')
sleeveTex = loader.loadTexture('phase_3/maps/desat_sleeve_5.jpg')
bodyNodes[3].setTexture(sleeveTex, 1)
bodyNodes[4].setTexture(topTex, 1)
bodyNodes[5].setTexture(botTex, 1)
DuckBody.reparentTo(render)
geom = DuckBody.getGeomNode()
geom.getChild(0).setSx(0.730000019073)
geom.getChild(0).setSz(0.730000019073)
offset = 3.2375
base.camera.reparentTo(DuckBody)
base.camera.setPos(0, -9.0 - offset, offset)
wallBitmask = BitMask32(1)
floorBitmask = BitMask32(2)
base.cTrav = CollisionTraverser()
base.camera.hide()
def getAirborneHeight():
return offset + 0.025
walkControls = GravityWalker(legacyLifter=True)
walkControls.setWallBitMask(wallBitmask)
walkControls.setFloorBitMask(floorBitmask)
walkControls.setWalkSpeed(16.0, 24.0, 8.0, 80.0)
walkControls.initializeCollisions(base.cTrav, DuckBody, floorOffset=0.025, reach=4.0)
walkControls.setAirborneHeightFunc(getAirborneHeight)
walkControls.enableAvatarControls()
DuckBody.physControls = walkControls
def setWatchKey(key, input, keyMapName):
def watchKey(active = True):
if active == True:
inputState.set(input, True)
keyMap[keyMapName] = 1
else:
inputState.set(input, False)
keyMap[keyMapName] = 0
base.accept(key, watchKey, [True])
base.accept(key + '-up', watchKey, [False])
keyMap = {'left': 0,
'right': 0,
'forward': 0,
'backward': 0,
'control': 0}
setWatchKey('arrow_up', 'forward', 'forward')
setWatchKey('control-arrow_up', 'forward', 'forward')
setWatchKey('alt-arrow_up', 'forward', 'forward')
setWatchKey('shift-arrow_up', 'forward', 'forward')
setWatchKey('arrow_down', 'reverse', 'backward')
setWatchKey('control-arrow_down', 'reverse', 'backward')
setWatchKey('alt-arrow_down', 'reverse', 'backward')
setWatchKey('shift-arrow_down', 'reverse', 'backward')
setWatchKey('arrow_left', 'turnLeft', 'left')
setWatchKey('control-arrow_left', 'turnLeft', 'left')
setWatchKey('alt-arrow_left', 'turnLeft', 'left')
setWatchKey('shift-arrow_left', 'turnLeft', 'left')
setWatchKey('arrow_right', 'turnRight', 'right')
setWatchKey('control-arrow_right', 'turnRight', 'right')
setWatchKey('alt-arrow_right', 'turnRight', 'right')
setWatchKey('shift-arrow_right', 'turnRight', 'right')
setWatchKey('control', 'jump', 'control')
movingNeutral, movingForward = (False, False)
movingRotation, movingBackward = (False, False)
movingJumping = False
def setMovementAnimation(loopName, playRate = 1.0):
global movingRotation
global movingBackward
global movingForward
global movingNeutral
global movingJumping
if 'jump' in loopName:
movingJumping = True
movingForward = False
movingNeutral = False
movingRotation = False
movingBackward = False
elif loopName == 'run':
movingJumping = False
movingForward = True
movingNeutral = False
movingRotation = False
movingBackward = False
elif loopName == 'walk':
movingJumping = False
movingForward = False
movingNeutral = False
if playRate == -1.0:
movingBackward = True
movingRotation = False
else:
movingBackward = False
movingRotation = True
elif loopName == 'neutral':
movingJumping = False
movingForward = False
movingNeutral = True
movingRotation = False
movingBackward = False
else:
movingJumping = False
movingForward = False
movingNeutral = False
movingRotation = False
movingBackward = False
ActorInterval(DuckBody, loopName, playRate=playRate).loop()
def handleMovement(task):
if keyMap['control'] == 1:
if keyMap['forward'] or keyMap['backward'] or keyMap['left'] or keyMap['right']:
if movingJumping == False:
if DuckBody.physControls.isAirborne:
setMovementAnimation('running-jump-idle')
elif keyMap['forward']:
if movingForward == False:
setMovementAnimation('run')
elif keyMap['backward']:
if movingBackward == False:
setMovementAnimation('walk', playRate=-1.0)
elif keyMap['left'] or keyMap['right']:
if movingRotation == False:
setMovementAnimation('walk')
elif not DuckBody.physControls.isAirborne:
if keyMap['forward']:
if movingForward == False:
setMovementAnimation('run')
elif keyMap['backward']:
if movingBackward == False:
setMovementAnimation('walk', playRate=-1.0)
elif keyMap['left'] or keyMap['right']:
if movingRotation == False:
setMovementAnimation('walk')
elif movingJumping == False:
if DuckBody.physControls.isAirborne:
setMovementAnimation('jump-idle')
elif movingNeutral == False:
setMovementAnimation('neutral')
elif not DuckBody.physControls.isAirborne:
if movingNeutral == False:
setMovementAnimation('neutral')
fsrun.stop()
elif keyMap['forward'] == 1:
if movingForward == False:
if not DuckBody.physControls.isAirborne:
setMovementAnimation('run')
elif keyMap['backward'] == 1:
if movingBackward == False:
if not DuckBody.physControls.isAirborne:
setMovementAnimation('walk', playRate=-1.0)
elif keyMap['left'] or keyMap['right']:
if movingRotation == False:
if not DuckBody.physControls.isAirborne:
setMovementAnimation('walk')
fswalk = loader.loadSfx('phase_3.5/audio/sfx/AV_footstep_walkloop.wav')
elif not DuckBody.physControls.isAirborne:
if movingNeutral == False:
setMovementAnimation('neutral')
return Task.cont
base.taskMgr.add(handleMovement, 'controlManager')
def collisionsOn():
DuckBody.physControls.setCollisionsActive(True)
DuckBody.physControls.isAirborne = True
def collisionsOff():
DuckBody.physControls.setCollisionsActive(False)
DuckBody.physControls.isAirborne = True
def toggleCollisions():
if DuckBody.physControls.getCollisionsActive():
DuckBody.physControls.setCollisionsActive(False)
DuckBody.physControls.isAirborne = True
else:
DuckBody.physControls.setCollisionsActive(True)
DuckBody.physControls.isAirborne = True
base.accept('f1', toggleCollisions)
DuckBody.collisionsOn = collisionsOn
DuckBody.collisionsOff = collisionsOff
DuckBody.toggleCollisions = toggleCollisions
fsrun = loader.loadSfx('phase_3/audio/bgm/tt_theme.mid')
fsrun.setLoop(True)
fsrun.play()
localAvatar = DuckBody
base.localAvatar = localAvatar
localAvatar.physControls.placeOnFloor()
onScreenDebug.enabled = False
def updateOnScreenDebug(task):
onScreenDebug.add('Avatar Position', localAvatar.getPos())
onScreenDebug.add('Avatar Angle', localAvatar.getHpr())
return Task.cont
MickeyFont = loader.loadFont('phase_3/models/fonts/MickeyFont.bam')
class EnvironmentTTC():
def __init__(self):
self.modeldict = {}
self.LoadTTC()
def LoadTTC(self):
self.modelloader('Sky', 'phase_3.5/models/props/TT_sky.bam', render, 0, 0, 0, 0, 0, 0, 5, 5, 5)
Clouds1 = self.modeldict['Sky'].find('**/cloud1')
Clouds2 = self.modeldict['Sky'].find('**/cloud2')
Clouds1.setScale(0.6, 0.6, 0.6)
Clouds2.setScale(0.9, 0.9, 0.9)
Clouds1Spin = Clouds1.hprInterval(360, Vec3(60, 0, 0))
Clouds1Spin.loop()
Clouds2Spin = Clouds2.hprInterval(360, Vec3(-60, 0, 0))
Clouds2Spin.loop()
self.modelloader('TTC', 'phase_4/models/neighborhoods/toontown_central.bam', render, 0, 0, 0, -90, 0, 0, 1, 1, 1)
self.modeldict['TTC'].setTransparency(TransparencyAttrib.MBinary, 1)
self.modelloader('ToonHQ', 'phase_3.5/models/modules/hqTT.bam', render, 24.6425, 24.8587, 4.00001, 135, 0, 0, 1, 1, 1)
self.modeldict['ToonHQ'].find('**/doorFrameHoleRight_0').hide()
self.modeldict['ToonHQ'].find('**/doorFrameHoleLeft_0').hide()
self.modeldict['ToonHQ'].find('**/doorFrameHoleRight_1').hide()
self.modeldict['ToonHQ'].find('**/doorFrameHoleLeft_1').hide()
self.modelloader('Partygate', 'phase_4/models/modules/partyGate_TT.bam', render, 77.935, -159.939, 2.70141, 195, 0, 0, 1, 1, 1)
self.modelloader('Petshop', 'phase_4/models/modules/PetShopExterior_TT.bam', render, -124.375, 74.3749, 0.5, 49, 0, 0, 1, 1, 1)
self.modelloaderanimate('PetshopFish', 'phase_4/models/props/exteriorfish-zero.bam', 'phase_4/models/props/exteriorfish-swim.bam', self.modeldict['Petshop'], 0, 0, 0, 0, 0, 0, 1, 1, 1, 'swim')
Petsign1 = self.modeldict['Petshop'].find('**/sign_origin')
self.textloader('Pettext2', 'Pettextnode2', 'Pettextname2', 'Pet Shop', MickeyFont, Petsign1, -5, -0.2, 0.2, 0, 0, 0, 2, 2, 2, 0.9, 0.88, 0.1)
Petdoor = self.modeldict['Petshop'].find('**/door_origin')
self.modelloadercopyto('Door1', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_round_ur', Petdoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door1'].setColor(1, 0.87, 0.38)
self.modelloader('Clothingshop', 'phase_4/models/modules/clothshopTT.bam', render, 106.265, 160.831, 3, -30, 0, 0, 1, 1, 1)
Clothingsign1 = self.modeldict['Clothingshop'].find('**/sign_origin')
self.textloader('Clothingtext2', 'Clothingtextnode2', 'Clothingtextname2', 'Clothing Shop', MickeyFont, Clothingsign1, -6.7, -0.2, 0.1, 0, 0, 0, 1.5, 1.5, 1.5, 0.88, 0.45, 0.38)
Clothingdoor = self.modeldict['Clothingshop'].find('**/door_origin')
self.modelloadercopyto('Door2', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_clothshop', Clothingdoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door2'].setColor(0.88, 0.45, 0.38)
self.modelloader('Toonhall', 'phase_4/models/modules/toonhall.bam', render, 116.66, 24.29, 4, -90, 0, 0, 1, 1, 1)
Hallsign = self.modeldict['Toonhall'].find('**/sign_origin')
self.textloader('Halltext1', 'Halltextnode1', 'Halltextname1', 'Mickey', MickeyFont, Hallsign, -5, -0.2, -0.5, 0, 0, 0, 2.5, 2.5, 2.5, 0.9, 0.88, 0.1)
self.textloader('Halltext2', 'Halltextnode2', 'Halltextname2', 'Toon Hall', MickeyFont, Hallsign, -7, -0.2, -3, 0, 0, 0, 2.5, 2.5, 2.5, 0.9, 0.88, 0.1)
Halldoor = self.modeldict['Toonhall'].find('**/toonhall_door_origin')
self.modelloadercopyto('Door3', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_round_ur', Halldoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door3'].setColor(0.88, 0.45, 0.38)
self.modelloader('Schoolhouse', 'phase_4/models/modules/school_house.bam', render, 129.919, -138.445, 2.4997, -140, 0, 0, 1, 1, 1)
Schoolsign = self.modeldict['Schoolhouse'].find('**/sign_origin')
self.modelloadercopyto('Schoolsign', 'phase_4/models/props/signs_TTC.bam', 'TTC_sign3', Schoolsign, 1, -0.05, 3.7, 0, 0, 0, 1, 1, 1)
self.textloader('Schooltext1', 'Schooltextnode1', 'Schooltextname1', 'Toontown', MickeyFont, Schoolsign, -2.5, -0.07, 4.8, 0, 0, 0, 1, 1, 1, 0.9, 0.88, 0.4)
self.textloader('Schooltext2', 'Schooltextnode2', 'Schooltextname2', 'School House', MickeyFont, Schoolsign, -4.8, -0.07, 3, 0, 0, 0, 1.4, 1.4, 1.4, 0.9, 0.5, 0.1)
Schooldoor = self.modeldict['Schoolhouse'].find('**/school_door_origin')
self.modelloadercopyto('Door4', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_square_ul', Schooldoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door4'].setColor(1, 0.63, 0.38)
self.modelloader('Bank', 'phase_4/models/modules/bank.bam', render, 57.1796, 38.6656, 4, 0, 0, 0, 1, 1, 1)
Banksign = self.modeldict['Bank'].find('**/sign_origin')
self.textloader('Banktext1', 'Banktextnode1', 'Banktextname1', 'Bank', MickeyFont, Banksign, -3.1, -0.2, -1, 0, 0, 0, 2.5, 2.5, 2.5, 0.9, 0.6, 0.1)
Bankdoor = self.modeldict['Bank'].find('**/bank_door_origin')
self.modelloadercopyto('Door5', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_round_ur', Bankdoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door5'].setColor(0.88, 0.45, 0.38)
self.modelloader('Library', 'phase_4/models/modules/library.bam', render, 91.4475, -44.9255, 4, 180, 0, 0, 1, 1, 1)
Librarysign = self.modeldict['Library'].find('**/sign_origin')
self.modelloadercopyto('Librarysign', 'phase_4/models/props/signs_TTC.bam', 'TTC_sign3', Librarysign, 1.7, -0.05, 3.7, 0, 0, 0, 1, 1, 1)
self.textloader('Librarytext1', 'Librarytextnode1', 'Librarytextname1', 'Toontown', MickeyFont, Librarysign, -1.5, -0.07, 4.8, 0, 0, 0, 1, 1, 1, 0.9, 0.88, 0.4)
self.textloader('Librarytext2', 'Librarytextnode2', 'Librarytextname2', 'Library', MickeyFont, Librarysign, -2.8, -0.07, 3, 0, 0, 0, 1.9, 1.9, 1.9, 0.9, 0.5, 0.1)
Librarydoor = self.modeldict['Library'].find('**/library_door_origin')
self.modelloadercopyto('Door6', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_round_ur', Librarydoor, 0, 0, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door6'].setColor(0.88, 0.45, 0.38)
self.modelloader('Gagshop', 'phase_4/models/modules/gagShop_TT.bam', render, -86.6848, -90.5693, 0.500015, 0, 0, 0, 1, 1, 1)
Gagdoor = self.modeldict['Gagshop'].find('**/building_front')
self.modelloadercopyto('Door7', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_square_ur', Gagdoor, 3, 0.1, 0, 180, 0, 0, 1, 1, 1)
self.modeldict['Door7'].setColor(1, 0.63, 0.38)
self.modelloader('GoofyTunnel', 'phase_4/models/modules/Speedway_Tunnel.bam', render, 20.9205, 172.683, 3.24925, -150, -0.083787, 0.0101321, 1, 1, 1)
Goofysign = self.modeldict['GoofyTunnel'].find('**/sign_origin')
self.textloader('Goofytext1', 'Goofytextnode1', 'Goofytextname1', 'Goofy', MickeyFont, Goofysign, -2, -0.07, 0.7, 0, 0, 0, 2.2, 2.2, 2.2, 0.1, 0.1, 0.7)
self.textloader('Goofytext2', 'Goofytextnode2', 'Goofytextname2', 'Speed Way', MickeyFont, Goofysign, -6.1, -0.07, -2.8, 0, 0, 0, 2.6, 2.6, 2.6, 0.9, 0.5, 0.1)
self.modelloader('FirstTunnel', 'phase_4/models/modules/safe_zone_tunnel_TT.bam', render, -239.67, 64.08, -6.18, -90, 0, 0, 1, 1, 1)
SignOrigin1 = self.modeldict['FirstTunnel'].find('**/sign_origin')
self.modelloader('Orangesign1', 'phase_3.5/models/props/tunnel_sign_orange.bam', SignOrigin1, 0, -0.05, 0, 0, 0, 0, 1.5, 1.5, 1.5)
self.textloader('Tunnel1text1', 'Tunnel1textnode1', 'Tunnel1textname1', 'Loopy Lane', MickeyFont, SignOrigin1, -5.5, -0.07, -1.8, 0, 0, 0, 1.6, 1.6, 1.6, 0.0, 0.6, 0.1)
self.textloader('Tunnel1text2', 'Tunnel1textnode2', 'Tunnel1textname2', 'Toontown Central', MickeyFont, SignOrigin1, -5.7, -0.7, -2.9, 0, 0, 0, 1, 1, 1, 0.0, 0.6, 0.0)
self.modelloader('MickeyLogo1', 'phase_3.5/models/props/mickeySZ.bam', SignOrigin1, 0, -0.07, 2, 0, 0, 0, 4.5, 4.5, 4.5)
self.modelloader('SecondTunnel', 'phase_4/models/modules/safe_zone_tunnel_TT.bam', render, -68.38, -202.64, -3.58, -31, 0, 0, 1, 1, 1)
SignOrigin2 = self.modeldict['SecondTunnel'].find('**/sign_origin')
self.textloader('Tunnel2text1', 'Tunnel2textnode1', 'Tunnel2textname1', 'Silly Street', MickeyFont, SignOrigin2, -5.9, -0.07, -1.8, 0, 0, 0, 1.6, 1.6, 1.6, 0.0, 0.6, 0.1)
self.textloader('Tunnel2text2', 'Tunnel2textnode2', 'Tunnel2textname2', 'Toontown Central', MickeyFont, SignOrigin2, -5.7, -0.7, -2.9, 0, 0, 0, 1, 1, 1, 0.0, 0.6, 0.0)
self.modelloader('Orangesign2', 'phase_3.5/models/props/tunnel_sign_orange.bam', SignOrigin2, 0, -0.05, 0, 0, 0, 0, 1.5, 1.5, 1.5)
self.modelloader('MickeyLogo2', 'phase_3.5/models/props/mickeySZ.bam', SignOrigin2, 0, -0.07, 2, 0, 0, 0, 4.5, 4.5, 4.5)
self.modelloader('ThirdTunnel', 'phase_4/models/modules/safe_zone_tunnel_TT.bam', render, 27.6402, 176.475, -6.18, 171, 0, 0, 1, 1, 1)
SignOrigin3 = self.modeldict['ThirdTunnel'].find('**/sign_origin')
self.textloader('Tunnel3text1', 'Tunnel3textnode1', 'Tunnel3textname1', 'Punchline Place', MickeyFont, SignOrigin3, -7.7, -0.07, -1.8, 0, 0, 0, 1.6, 1.6, 1.6, 0.0, 0.6, 0.1)
self.textloader('Tunnel3text2', 'Tunnel3textnode2', 'Tunnel3textname2', 'Toontown Central', MickeyFont, SignOrigin3, -5.7, -0.7, -2.9, 0, 0, 0, 1, 1, 1, 0.0, 0.6, 0.0)
self.modelloader('Orangesign3', 'phase_3.5/models/props/tunnel_sign_orange.bam', SignOrigin3, 0, -0.05, 0, 0, 0, 0, 1.5, 1.5, 1.5)
self.modelloader('MickeyLogo3', 'phase_3.5/models/props/mickeySZ.bam', SignOrigin3, 0, -0.07, 2, 0, 0, 0, 4.5, 4.5, 4.5)
self.modelloader('Fishingdock1', 'phase_4/models/props/piers_tt.bam', render, -63.5335, 41.648, -3.36708, 120, 0, 0, 1, 1, 1)
self.modelloader('Fishingdock2', 'phase_4/models/props/piers_tt.bam', render, -90.2253, 42.5202, -3.3105, -135, 0, 0, 1, 1, 1)
self.modelloader('Fishingdock3', 'phase_4/models/props/piers_tt.bam', render, -94.9218, 31.4153, -3.20083, -105, 0, 0, 1, 1, 1)
self.modelloader('Fishingdock4', 'phase_4/models/props/piers_tt.bam', render, -77.5199, 46.9817, -3.28456, -180, 0, 0, 1, 1, 1)
self.modelloader('DDSign1', 'phase_4/models/props/neighborhood_sign_DD.bam', render, -59.1768, 92.9836, 0.499824, -9, 0, 0, 1, 1, 1)
self.modelloader('DDSign2', 'phase_4/models/props/neighborhood_sign_DD.bam', render, -33.749, 88.9499, 0.499825, 170, 0, 0, 1, 1, 1)
self.modelloader('MMSign1', 'phase_4/models/props/neighborhood_sign_MM.bam', render, -143.503, -8.9528, 0.499987, 90, 0, 0, 1, 1, 1)
self.modelloader('MMSign2', 'phase_4/models/props/neighborhood_sign_MM.bam', render, -143.242, 16.9541, 0.499977, -90, 0, 0, 1, 1, 1)
self.modelloader('DGSign1', 'phase_4/models/props/neighborhood_sign_DG.bam', render, 21.3941, -144.665, 2.99998, -30, 0, 0, 1, 1, 1)
self.modelloader('DGSign2', 'phase_4/models/props/neighborhood_sign_DG.bam', render, 44.1038, -157.906, 2.99998, 148, 0, 0, 1, 1, 1)
self.modelloader('Gazebo', 'phase_4/models/modules/gazebo.bam', render, -60.44, -11.4, -2, -178, 0, 0, 1, 1, 1)
self.modelloader('Fountain', 'phase_4/models/props/toontown_central_fountain.bam', render, 93.2057, -106.482, 2.50002, 0, 0, 0, 1, 1, 1)
self.modelloader('Mickeyhorse', 'phase_4/models/props/mickey_on_horse.bam', render, 73.6829, 121.026, 2.49996, 0, 0, 0, 1, 1, 1)
self.modelloader('FlowerPlant1', 'phase_3.5/models/props/big_planter.bam', render, 18.9496, -48.977, 4.95856, 135, 0, 0, 1, 1, 1)
self.modelloader('FlowerPlant2', 'phase_3.5/models/props/big_planter.bam', render, 19.2327, 52.5553, 4.95837, -135, 0, 0, 1, 1, 1)
self.modelloader('Fence1', 'phase_3.5/models/modules/wood_fence.bam', render, -148, -23, 0.5, 90, 0, 0, 1, 1, 1)
self.modelloader('Fence2', 'phase_3.5/models/modules/wood_fence.bam', render, -147, -32.8, 0.5, 96, 0, 0, 1, 1, 1)
self.modelloader('Fence3', 'phase_3.5/models/modules/wood_fence.bam', render, -144.1, -41.9, 0.5, 107, 0, 0, 1, 1, 1)
self.modelloader('Fence4', 'phase_3.5/models/modules/wood_fence.bam', render, -95, -95.5, 0.5, 160, 0, 0, 1, 1, 1)
self.modelloader('Fence5', 'phase_3.5/models/modules/wood_fence.bam', render, -104, -92.2, 0.5, 150, 0, 0, 1, 1, 1)
self.modelloader('Fence6', 'phase_3.5/models/modules/wood_fence.bam', render, -112.5, -87.3, 0.5, 148, 0, 0, 1, 1, 1)
self.modelloader('Fence7', 'phase_3.5/models/modules/wood_fence.bam', render, -140.73, -53, 0.5, 107, 0, 0, 1.16, 1, 1.0)
self.modelloaderstreetlight('Streetlight1', 'phase_3.5/models/props/streetlight_TT.bam', render, -125, 60, 0.5, 1500, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight2', 'phase_3.5/models/props/streetlight_TT.bam', render, 58.8, 93.6, 3, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight3', 'phase_3.5/models/props/streetlight_TT.bam', render, 95, 93.6, 3, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight4', 'phase_3.5/models/props/streetlight_TT.bam', render, 134, -126, 3, -130, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight5', 'phase_3.5/models/props/streetlight_TT.bam', render, 108, -28, 4, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight6', 'phase_3.5/models/props/streetlight_TT.bam', render, 108, 32, 4, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight7', 'phase_3.5/models/props/streetlight_TT.bam', render, 32, 61, 4, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight8', 'phase_3.5/models/props/streetlight_TT.bam', render, 28, -57, 4, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight9', 'phase_3.5/models/props/streetlight_TT.bam', render, -101, -70, 0.5, 80, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight10', 'phase_3.5/models/props/streetlight_TT.bam', render, -129, -42.5, 0.5, 90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight11', 'phase_3.5/models/props/streetlight_TT.bam', render, 3.8, 118, 3, -110, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight12', 'phase_3.5/models/props/streetlight_TT.bam', render, 116, 146, 3, 145, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight13', 'phase_3.5/models/props/streetlight_TT.bam', render, 86, 164, 3, -95, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight14', 'phase_3.5/models/props/streetlight_TT.bam', render, 45.5, -88, 3, -2, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight15', 'phase_3.5/models/props/streetlight_TT.bam', render, 78.3, -88, 3, -2, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight16', 'phase_3.5/models/props/streetlight_TT.bam', render, 100, -157, 3, 30, 0, 0, 1, 1, 1)
self.modelloadercopyto('Tree1', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -80.9143, 79.7948, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree2', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -26.1169, 73.7975, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree3', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 7.14367, 100.346, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree4', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 55.8308, 153.977, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree5', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 102.359, 81.1646, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree6', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 114.09, 57.3141, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree7', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 143.598, 110.178, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree8', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -128.41, 32.9562, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree9', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -128.708, -23.9096, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree10', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -52.4323, -73.2793, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree11', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 7.00708, -99.2181, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree12', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 96.5467, -145.522, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree13', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 119.57, -127.05, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree14', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 128.064, -60.4145, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree15', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 121.146, -45.0892, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree16', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 113.503, -57.8055, 2.725, 1, 1, 1, 1, 1, 1)
def modelloader(self, nodename, modelpath, renderparent, x, y, z, h, p, r, scale1, scale2, scale3):
self.modeldict[nodename] = loader.loadModel(modelpath)
self.modeldict[nodename].reparentTo(renderparent)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
def modelloadercopyto(self, nodename, modelpath, findmodel, renderparent, x, y, z, h, p, r, scale1, scale2, scale3):
self.modeldict[nodename] = loader.loadModel(modelpath)
self.modeldict[nodename] = self.modeldict[nodename].find('**/' + findmodel).copyTo(renderparent)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
def modelloaderanimate(self, nodename, modelpath, animatepath, renderparent, x, y, z, h, p, r, scale1, scale2, scale3, animation):
self.modeldict[nodename] = Actor(modelpath, {animation: animatepath})
self.modeldict[nodename].reparentTo(renderparent)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
self.modeldict[nodename].loop(animation)
def textloader(self, nodename, Textnodename, Textname, Textdata, Fonttype, renderparent, x, y, z, h, p, r, scale1, scale2, scale3, color1, color2, color3):
Textname = TextNode(Textnodename)
Textname.setText(Textdata)
Textname.setFont(Fonttype)
self.modeldict[nodename] = renderparent.attachNewNode(Textname)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
self.modeldict[nodename].setColor(color1, color2, color3)
def modelloaderstreetlight(self, nodename, modelpath, renderparent, x, y, z, h, p, r, scale1, scale2, scale3):
self.modeldict[nodename] = loader.loadModel(modelpath)
self.modeldict[nodename].reparentTo(renderparent)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
self.modeldict[nodename].find('**/prop_post_light_base').hide()
self.modeldict[nodename].find('**/p1').hide()
self.modeldict[nodename].find('**/prop_post_one_light').hide()
self.modeldict[nodename].find('**/p13').hide()
BTFont = loader.loadFont('phase_3/models/fonts/MickeyFont.bam')
bk_text = ' '
textObject = OnscreenText(text=bk_text, pos=(0.95, -0.95), scale=0.07, fg=(1,
0.5,
0.5,
1), align=TextNode.ACenter, mayChange=1)
textObject.setFont(BTFont)
def setText(textEntered):
textObject.setText(textEntered)
if b:
b.hide()
ImgBtn2.show()
def clearText():
if b:
b.enterText('')
def openChatGui():
if b:
b.show()
ImgBtn2.hide()
chatGui = loader.loadModel('phase_3.5/models/gui/chat_input_gui.bam')
b = DirectEntry(text='', scale=0.05, command=setText, initialText='Type Something', numLines=3, focus=1, focusInCommand=clearText)
b.hide()
chatGui = loader.loadModel('phase_3.5/models/gui/chat_input_gui.bam')
chatGui = loader.loadModel('phase_3.5/models/gui/chat_input_gui.bam')
ImgBtn2 = DirectButton(frameSize=None, text=' ', image=(chatGui.find('**/ChtBx_ChtBtn_UP'), chatGui.find('**/ChtBx_ChtBtn_DN'), chatGui.find('**/ChtBx_ChtBtn_RLVR')), relief=None, command=openChatGui, text_pos=(2, -0.325), geom=None, pad=(0.01, 0.01), suppressKeys=0, pos=(-1.21, -1, 0.9), text_scale=1, borderWidth=(0.015, 0.01))
b.setPos(-1.21, -2, 5)
chatGui = loader.loadModel('phase_3.5/models/gui/chat_input_gui.bam')
ImgBtn2 = DirectButton(frameSize=None, text=' ', image=(chatGui.find('**/ChtBx_ChtBtn_UP'), chatGui.find('**/ChtBx_ChtBtn_DN'), chatGui.find('**/ChtBx_ChtBtn_RLVR')), relief=None, command=openChatGui, text_pos=(2, -0.325), geom=None, pad=(0.01, 0.01), suppressKeys=0, pos=(-1.1, -1, 0.9), text_scale=0.06, color=(0, 1, 0), borderWidth=(0.015, 0.01))
b.setPos(-1.21, -2, 0.75)
Font = loader.loadFont('phase_3/models/fonts/Courier.bam')
tag = OnscreenText(scale=0.5, text='Smirky Superchomp', bg=(0.9,
0.9,
0.9,
0.3), fg=(0.35,
0.35,
0.95,
1), decal=True)
tag.wrtReparentTo(DuckBody)
tag.setBillboardAxis()
tag.setPos(0, 0)
tag.setDepthTest(True)
tag.setDepthWrite(True)
tag.reparentTo(DuckBody)
tag.setZ(tag, DuckBody.find('**/__Actor_head').getZ(DuckBody) + -1)
tag.reparentTo(DuckBody.find('**/def_head'))
tag.setFont(BTFont)
fist = loader.loadModel('phase_3.5/models/gui/tt_m_gui_gm_toonResistance_fist.bam')
fist.reparentTo(DuckBody.find('**/def_head'))
fist.setPos(0, 0, 0)
fist.setScale(2.3)
fist.find('**/gmPartyHat').remove()
ttHatSpin = fist.find('**/fistIcon').hprInterval(3, Vec3(360, 0, 0))
ttHatSpin.loop()
fedora = loader.loadModel('phase_4/models/accessories/tt_m_chr_avt_acc_hat_fedora.bam')
fedora.reparentTo(DuckBody.find('**/def_head'))
fedora.setScale(0.35)
fedora.setZ(0.75)
fedora.setH(180)
nerdglasses = loader.loadModel('phase_4/models/accessories/tt_m_chr_avt_acc_msk_squareRims.bam')
nerdglasses.reparentTo(DuckBody.find('**/def_head'))
nerdglasses.setH(180)
nerdglasses.setScale(0.45)
nerdglasses.setZ(0.2)
nerdglasses.setY(0.05)
CS1 = loader.loadModel('phase_5/models/modules/TT_A2.bam')
CS1.reparentTo(render)
CS1.setH(138.37)
CS1.setX(-109.07)
CS1.setY(-92.27)
CS2 = loader.loadModel('phase_5/models/modules/TT_A3.bam')
CS2.reparentTo(render)
CS2.setH(104.93)
CS2.setX(-132.65)
CS2.setY(-74.96)
def TP1():
DuckBody.setZ(500.89)
DuckBody.setY(59.6964)
DuckBody.setX(-1.00264)
ButtonImage = loader.loadModel('phase_3/models/gui/quit_button.bam')
ImgBtn11 = DirectButton(frameSize=None, text='SpeedWay', image=(ButtonImage.find('**/QuitBtn_UP'), ButtonImage.find('**/QuitBtn_DN'), ButtonImage.find('**/QuitBtn_RLVR')), relief=None, command=TP1, text_pos=(0, -0.015), geom=None, pad=(0.01, 0.01), suppressKeys=0, pos=(-0.05, 0, 0.95), text_scale=0.059, borderWidth=(0.13, 0.01), scale=0.7, color=(0, 1, 0))
environ = loader.loadModel('phase_6/models/karting/GasolineAlley_TT.bam')
environ.reparentTo(render)
environ.setZ(500)
tunnel = loader.loadModel('phase_4/models/modules/safe_zone_tunnel_TT.bam')
tunnel.reparentTo(render)
tunnel.setPos(60, 175, 493)
tunnel.setHpr(180, 0, 0)
tunnel.setScale(1)
tunnelsign = loader.loadModel('phase_3.5/models/props/tunnel_sign_orange.bam')
tunnelsign.reparentTo(tunnel)
tunnelsign.setPos(60, 95.01, 523.7)
tunnelsign.setHpr(180, 0, 0)
tunnelsign.setScale(1.6)
SZsign = loader.loadModel('phase_4/models/props/goofySZ.bam')
SZsign.reparentTo(tunnel)
SZsign.setPos(60, 95.025, 523.7)
SZsign.setHpr(180, 0, 0)
SZsign.setScale(4)
kartshop = loader.loadModel('phase_6/models/karting/kartShop.bam')
kartshop.reparentTo(render)
kartshop.setPos(0, 10, 500)
scoreboard = loader.loadModel('phase_6/models/karting/tt_m_ara_gfs_leaderBoardCrashed.bam')
scoreboard.reparentTo(render)
scoreboard.setPos(1, -111, 500)
scoreboard.setHpr(180, 0, 0)
wrench = loader.loadModel('phase_6/models/karting/KartArea_WrenchJack.bam')
wrench.reparentTo(render)
wrench.setPos(-33, 5, 500)
wrench.setHpr(180, 0, 0)
tires = loader.loadModel('phase_6/models/karting/KartArea_Tires.bam')
tires.reparentTo(render)
tires.setPos(33, 5, 500)
trees1 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees1.reparentTo(render)
trees1.setPos(-13, 58, 499.7)
trees1.setScale(12)
trees2 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees2.reparentTo(render)
trees2.setPos(13, 58, 499.7)
trees2.setScale(12)
trees3 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees3.reparentTo(render)
trees3.setPos(-13, -35, 499.7)
trees3.setScale(12)
trees4 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees4.reparentTo(render)
trees4.setPos(13, -35, 499.7)
trees4.setScale(12)
trees5 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees5.reparentTo(render)
trees5.setPos(-10, -76, 499.7)
trees5.setScale(12)
trees6 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees6.reparentTo(render)
trees6.setPos(10, -76, 499.7)
trees6.setScale(12)
light1 = loader.loadModel('phase_6/models/karting/GoofyStadium_Lamppost_Base1.bam')
light1.reparentTo(render)
light1.setPos(-10, -52, 499.3)
light1.setScale(14)
light2 = loader.loadModel('phase_6/models/karting/GoofyStadium_Lamppost_Base1.bam')
light2.reparentTo(render)
light2.setPos(10, -52, 499.3)
light2.setScale(14)
box = loader.loadModel('phase_6/models/karting/GoofyStadium_Mailbox.bam')
box.reparentTo(render)
box.setPos(16, -50, 500)
box.setHpr(210, 0, 0)
box.setScale(10)
flag1 = loader.loadModel('phase_6/models/karting/flag.bam')
flag1.reparentTo(render)
flag1.setPos(-18, 6, 499.8)
flag2 = loader.loadModel('phase_6/models/karting/flag.bam')
flag2.reparentTo(render)
flag2.setPos(18, 6, 499.8)
sign = loader.loadModel('phase_6/models/karting/KartShowBlockSign.bam')
sign.reparentTo(render)
sign.setPos(-16, -50, 500)
sign.setHpr(-120, 0, 0)
sign.setScale(26)
announcer1 = loader.loadModel('phase_6/models/karting/announcer.bam')
announcer1.reparentTo(render)
announcer1.setPos(25, -150, 499.3)
announcer1.setHpr(-140, 0, 0)
announcer2 = loader.loadModel('phase_6/models/karting/announcer.bam')
announcer2.reparentTo(render)
announcer2.setPos(-26, -149, 499.3)
announcer2.setHpr(-212, 0, 0)
announcer3 = loader.loadModel('phase_6/models/karting/announcer.bam')
announcer3.reparentTo(render)
announcer3.setPos(-38, -135, 499.3)
announcer3.setHpr(-212, 0, 0)
announcer4 = loader.loadModel('phase_6/models/karting/announcer.bam')
announcer4.reparentTo(render)
announcer4.setPos(37, -137.5, 499.3)
announcer4.setHpr(-140, 0, 0)
cone1 = loader.loadModel('phase_6/models/karting/cone.bam')
cone1.reparentTo(render)
cone1.setPos(13, -4, 499.7)
cone2 = loader.loadModel('phase_6/models/karting/cone.bam')
cone2.reparentTo(render)
cone2.setPos(13, 20, 499.7)
cone3 = loader.loadModel('phase_6/models/karting/cone.bam')
cone3.reparentTo(render)
cone3.setPos(-14, 18, 499.7)
cone4 = loader.loadModel('phase_6/models/karting/cone.bam')
cone4.reparentTo(render)
cone4.setPos(-14, -3, 499.7)
cone5 = loader.loadModel('phase_6/models/karting/cone.bam')
cone5.reparentTo(render)
cone5.setPos(-23, 9, 499.7)
cone6 = loader.loadModel('phase_6/models/karting/cone.bam')
cone6.reparentTo(render)
cone6.setPos(45, -138, 499.4)
cone7 = loader.loadModel('phase_6/models/karting/cone.bam')
cone7.reparentTo(render)
cone7.setPos(25, -109, 500)
cone8 = loader.loadModel('phase_6/models/karting/cone.bam')
cone8.reparentTo(render)
cone8.setPos(24, -111, 500)
cone8.setHpr(45, 0, 0)
cone9 = loader.loadModel('phase_6/models/karting/cone.bam')
cone9.reparentTo(render)
cone9.setPos(75, -106, 500)
cone9.setHpr(0, 0, -120)
cone10 = loader.loadModel('phase_6/models/karting/cone.bam')
cone10.reparentTo(render)
cone10.setPos(76.5, -107.5, 500)
cone10.setHpr(0, 120, 0)
cone11 = loader.loadModel('phase_6/models/karting/cone.bam')
cone11.reparentTo(render)
cone11.setPos(26, -154, 499.3)
cone11.setHpr(42, 0, 0)
cone12 = loader.loadModel('phase_6/models/karting/cone.bam')
cone12.reparentTo(render)
cone12.setPos(1, -187, 501.22)
cone12.setHpr(42, 0, 0)
krate1 = loader.loadModel('phase_6/models/karting/krate.bam')
krate1.reparentTo(render)
krate1.setPos(1, -187, 499.3)
krate1.setScale(1.2)
krate2 = loader.loadModel('phase_6/models/karting/krate.bam')
krate2.reparentTo(render)
krate2.setPos(-48, -115, 499.3)
krate2.setScale(1.2)
krate3 = loader.loadModel('phase_6/models/karting/krate.bam')
krate3.reparentTo(render)
krate3.setPos(-50, -113, 499.3)
krate3.setHpr(45, 0, 0)
krate3.setScale(1.2)
krate4 = loader.loadModel('phase_6/models/karting/krate.bam')
krate4.reparentTo(render)
krate4.setPos(-49, -114, 501.22)
krate4.setHpr(60, 0, 0)
krate4.setScale(1.2)
def TP2():
DuckBody.setZ(0)
DuckBody.setX(0)
DuckBody.setY(0)
ButtonImage = loader.loadModel('phase_3/models/gui/quit_button.bam')
ImgBtn11 = DirectButton(frameSize=None, text='TTC', image=(ButtonImage.find('**/QuitBtn_UP'), ButtonImage.find('**/QuitBtn_DN'), ButtonImage.find('**/QuitBtn_RLVR')), relief=None, command=TP2, text_pos=(0, -0.015), geom=None, pad=(0.01, 0.01), suppressKeys=0, pos=(1, 0, 0.95), text_scale=0.059, borderWidth=(0.13, 0.01), scale=0.7, color=(0, 1, 0))
lord = Actor('phase_3/models/char/mickey-1200.bam', {'walk': 'phase_3/models/char/mickey-walk.bam'})
lord.reparentTo(render)
lord.loop('walk')
lord.setX(106.58)
lord.setY(-1.37)
lord.setZ(4.46)
lord.setH(104.62)
cs = CollisionSphere(0, 0, 1, 3)
cnodePath = lord.attachNewNode(CollisionNode('cnode'))
cnodePath.node().addSolid(cs)
pandaPosInterval1 = lord.posInterval(3, Point3(96.3312, 0.553801, 4.025), startPos=Point3(96.3312, 0.553801, 4.025))
pandaHprInterval1 = lord.hprInterval(3, Point3(96.3312, 0.553801, 4.025), startHpr=Point3(96.3312, 0.553801, 4.025))
pandaPosInterval2 = lord.posInterval(3, Point3(54.1032, 10.1371, 4.025), startPos=Point3(96.3312, 0.553801, 4.025))
pandaHprInterval2 = lord.hprInterval(3, Point3(172.798, 0, 0), startHpr=Point3(96.3312, 0.553801, 4.025))
pandaPosInterval3 = lord.posInterval(3, Point3(62.9905, -21.4791, 6.05112), startPos=Point3(54.1032, 10.1371, 4.025))
pandaHprInterval3 = lord.hprInterval(3, Point3(438.492, 0, 0), startHpr=Point3(172.798, 0, 0))
lord.pandaPace = Sequence(pandaPosInterval1, pandaHprInterval1, pandaPosInterval2, pandaHprInterval2, pandaPosInterval3, pandaHprInterval3)
lord.pandaPace.loop()
environ = EnvironmentTTC()
base.taskMgr.add(updateOnScreenDebug, 'UpdateOSD')
tag2 = OnscreenText(scale=2, text='Mickey', bg=(0.9,
0.9,
0.9,
0.3), fg=(0.35,
0.35,
0.95,
1), decal=True)
tag2.wrtReparentTo(lord)
tag2.setBillboardAxis()
tag2.setPos(0, 0)
tag2.setDepthTest(True)
tag2.setDepthWrite(True)
tag2.reparentTo(lord)
tag2.setZ(tag, lord.find('**/joint_pupilL').getZ(lord) + 1)
tag2.reparentTo(lord.find('**/joint_pupilL'))
tag2.setFont(BTFont)
tag2.setColor(1, 0.1, 0.1, 1.0)
title.destroy()
base.oobe()
run()
| ronanwow1001/Toontown-1 | Landwalker Example.py | Landwalker Example.py | py | 63,531 | python | en | code | 0 | github-code | 36 |
26014460878 | from ..h2o import random_forest
from .estimator_base import H2OEstimator
class H2ORandomForestEstimator(H2OEstimator):
def __init__(self,mtries=None,sample_rate=None,build_tree_one_node=None,ntrees=None,
max_depth=None,min_rows=None,nbins=None,nbins_cats=None,
binomial_double_trees=None,balance_classes=None,max_after_balance_size=None,
seed=None,offset_column=None,weights_column=None):
super(H2ORandomForestEstimator, self).__init__()
self.parms = locals()
self.parms = {k:v for k,v in self.parms.iteritems() if k!="self"}
self._estimator_type="regressor"
def fit(self,X,y=None,**params):
if y is not None:
if y.isfactor(): self._estimator_type="classifier"
self.__dict__=random_forest(x=X,y=y,**self.parms).__dict__.copy() | tomasgreif/h2o-3 | h2o-py/h2o/estimators/random_forest.py | random_forest.py | py | 807 | python | en | code | null | github-code | 36 |
12478884070 | import json
import os
import cv2
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
# change IDs to your IDs.
ID1 = '206299463'
ID2 = '312497084'
ID = "HW3_{0}_{1}".format(ID1, ID2)
RESULTS = 'results'
os.makedirs(RESULTS, exist_ok=True)
IMAGE_DIR_PATH = "Images"
# SET NUMBER OF PARTICLES
N = 100
# Initial Settings
s_initial = [297, # x center
139, # y center
16, # half width
43, # half height
0, # velocity x
0] # velocity y
# state index constants for readability becuase I keep forgetting
X_ind = 0
Y_ind = 1
W_ind = 2
H_ind = 3
VX_ind = 4
VY_ind = 5
WIDTH = 576
HEIGHT = 352
# set this to True if you want to generate a video of the tracking process
GENERATE_VIDEO = False
def predict_particles(s_prior: np.ndarray) -> np.ndarray:
"""Progress the prior state with time and add noise.
Note that we explicitly did not tell you how to add the noise.
We allow additional manipulations to the state if you think these are necessary.
Args:
s_prior: np.ndarray. The prior state.
Return:
state_drifted: np.ndarray. The prior state after drift (applying the motion model) and adding the noise.
"""
''' A little about our motion model assumptions:
the input video is of a running human, mostly on a horizontal plane; for an average running pace of 2~3 m/s,
a running human moves (horizontally) an average of 0.1 meters between frames.
We estimated the FPS of the input video to be ~25[fps] by by observing the video at different frame rates
and converging on what felt like natural movement; we then estimate the pixel/meter ratio to be ~65
by measuring the human's height in pixels (110[px]) and dividing it by an average human's height (1.7[m]),
as well as assuming same px/m ratio for horizontal and vertical directions.
Finally, we end up with a possible range 6~7 pixels horizontal displacement between frames;
as for vertical displacement, assuming a constant height human (most likely), we assume a maximum of
~3px vertical displacement between frames, for the scenario that the human meets a sudden slope.'''
# Progress the state with time
s_prior = s_prior.astype(float)
state_drifted = np.copy(s_prior)
# update current state's positions according to the prior's velocity
state_drifted[[X_ind, Y_ind]] += s_prior[[VX_ind, VY_ind]]
# the bounding box might drift out of the frame if the
# tracked object is moving towards the frame's edges
state_drifted[[X_ind, Y_ind]] = np.clip(state_drifted[[X_ind, Y_ind]].T, [0, 0], [WIDTH-1, HEIGHT-1]).T
# estimating uniform noise: x,y limits according typical human velocities,
# vx, vy limits according to typical human acceleration - a human might start/stop moving
# which will be reflected in abrupt changes in velocity
x_lim = 7
y_lim = 3
vx_lim = 4
vy_lim = 2
h_lim = w_lim = 0 # no changes in width/height
lims = np.vstack(np.array([x_lim, y_lim, w_lim, h_lim, vx_lim, vy_lim]))
noise = np.random.uniform(-1*lims, lims, size=state_drifted.shape)
state_drifted += noise
# keep velocities within reasonable limits as described in the motion model
state_drifted[[VX_ind, VY_ind]] = np.clip(state_drifted[[VX_ind, VY_ind]].T, [-1.2*vx_lim, -0.8*vy_lim], [1.2*vx_lim, 0.8*vy_lim]).T
return state_drifted
def compute_normalized_histogram(image: np.ndarray, state: np.ndarray) -> np.ndarray:
"""Compute the normalized histogram using the state parameters.
Args:
image: np.ndarray. The image we want to crop the rectangle from.
state: np.ndarray. State candidate.
Return:
hist: np.ndarray. histogram of quantized colors.
"""
x, y, w, h, _, _ = state.astype(int)
patch = image[y-h:y+h, x-w:x+w]
H = cv2.calcHist([patch], [0, 1, 2], None, [16, 16, 16], [0, 256, 0, 256, 0, 256])
H /= H.sum() # normalize histogram
return H.flatten()
def sample_particles(previous_state: np.ndarray, cdf: np.ndarray) -> np.ndarray:
"""Sample particles from the previous state according to the cdf.
If additional processing to the returned state is needed - feel free to do it.
Args:
previous_state: np.ndarray. previous state, shape: (6, N)
cdf: np.ndarray. cummulative distribution function: (N, )
Return:
s_next: np.ndarray. Sampled particles. shape: (6, N)
"""
rs = np.random.random(size=cdf.shape)
diffs = cdf - np.vstack(rs) # The resultant matrix D(iffs) holds: Dij = cdf[j]-rs[i]
diffs[diffs <= 0] = np.inf # I eliminate all the negative values in diffs from the comparison
new_indices = diffs.argmin(axis=1) # find the minimum value in each column, that is the new index of the particle
s_next = previous_state[:, new_indices]
# purposefully not calculating velocity after resampling
# instead, rely on the motion model described in predict_particles
# to estimate the velocity
return s_next
def bhattacharyya_distance(p: np.ndarray, q: np.ndarray) -> float:
"""Calculate Bhattacharyya Distance between two histograms p and q.
Args:
p: np.ndarray. first histogram.
q: np.ndarray. second histogram.
Return:
distance: float. The Bhattacharyya Distance.
"""
return np.exp(20*np.sqrt(p*q).sum())
def create_image_with_boundingbox(image: np.ndarray,
mean_bbox: tuple,
max_bbox: tuple,
current_bbox: tuple
) -> np.ndarray:
"""Create an image with the bounding box and the ID.
I used this function to create a video of the entire tracking process,
this proved very helpful in estimating the best tuning parameters for
the tracking algorithm, as well as include tweaks in the algorithm itself.
To view the video, set the "GENERATE_VIDEO" variable to True at the top of the file
"""
image_with_bbox = image.copy()
# max bbox in red
x, y, w, h = [int(round(i)) for i in max_bbox]
image_with_bbox = cv2.rectangle(image_with_bbox, (x-w, y-h), (x+w, y+h), (0, 0, 255), 2)
# mean bbox in green
x, y, w, h = [int(round(i)) for i in mean_bbox]
image_with_bbox = cv2.rectangle(image_with_bbox, (x-w, y-h), (x+w, y+h), (0, 255, 0), 2)
# current bbox in blue
x, y, w, h = [int(round(i)) for i in current_bbox]
image_with_bbox = cv2.rectangle(image_with_bbox, (x-w, y-h), (x+w, y+h), (255, 0, 0), 2)
return image_with_bbox
def show_particles(image: np.ndarray, state: np.ndarray, W: np.ndarray, frame_index: int, ID: str,
frame_index_to_mean_state: dict, frame_index_to_max_state: dict,
) -> tuple:
fig, ax = plt.subplots(1)
image = image[:,:,::-1]
plt.imshow(image)
plt.title(ID + " - Frame number = " + str(frame_index))
# Avg particle box
avg_state = np.average(state, axis=1, weights=W)
(x_avg, y_avg, w_avg, h_avg, _, _) = avg_state
rect = patches.Rectangle((x_avg-w_avg, y_avg-h_avg), 2*w_avg, 2*h_avg, linewidth=2, edgecolor='g', facecolor='none')
ax.add_patch(rect)
# calculate Max particle box
max_state = state[:, np.argmax(W)]
(x_max, y_max, w_max, h_max, _, _) = max_state
rect = patches.Rectangle((x_max-w_max, y_max-h_max), 2*w_max, 2*h_max, linewidth=2, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.show(block=False)
fig.savefig(os.path.join(RESULTS, ID + "-" + str(frame_index) + ".png"))
frame_index_to_mean_state[frame_index] = [float(x) for x in [x_avg, y_avg, w_avg, h_avg]]
frame_index_to_max_state[frame_index] = [float(x) for x in [x_max, y_max, w_max, h_max]]
return frame_index_to_mean_state, frame_index_to_max_state
def main():
state_at_first_frame = np.matlib.repmat(s_initial, N, 1).T
S = predict_particles(state_at_first_frame)
# LOAD FIRST IMAGE
image = cv2.imread(os.path.join(IMAGE_DIR_PATH, "001.png"))
# COMPUTE NORMALIZED HISTOGRAM
q = compute_normalized_histogram(image, np.array(s_initial))
# COMPUTE NORMALIZED WEIGHTS (W) AND PREDICTOR CDFS (C)
weights = np.array([bhattacharyya_distance(compute_normalized_histogram(image, s), q) for s in S.T])
weights /= weights.sum()
# Initialize the variable W with the computed weights
W = weights
# COMPUTE CDF
cdf = np.cumsum(weights)
images_processed = 1
# MAIN TRACKING LOOP
image_name_list = os.listdir(IMAGE_DIR_PATH)
image_name_list.sort()
images_paths = [os.path.join(IMAGE_DIR_PATH, image_name) for image_name in image_name_list]
frame_index_to_avg_state = {}
frame_index_to_max_state = {}
if GENERATE_VIDEO:
dimensions = image.shape[:2][::-1]
slowed_down_vw = cv2.VideoWriter(os.path.join(RESULTS, "slowed_down_video.avi"),
fourcc=cv2.VideoWriter_fourcc(*'XVID'),
fps=10,
frameSize=dimensions,
isColor=True)
real_time_vw = cv2.VideoWriter(os.path.join(RESULTS, "normal_speed_video.avi"),
fourcc=cv2.VideoWriter_fourcc(*'XVID'),
fps=25,
frameSize=dimensions,
isColor=True)
mean_bbox = s_initial[:4]
max_bbox = s_initial[:4]
for image_path in images_paths[1:]:
S_prev = S
# LOAD NEW IMAGE FRAME
current_image = cv2.imread(image_path)
# SAMPLE THE CURRENT PARTICLE FILTERS
S_next_tag = sample_particles(S_prev, cdf)
# PREDICT THE NEXT PARTICLE FILTERS (YOU MAY ADD NOISE)
S = predict_particles(S_next_tag)
# COMPUTE NORMALIZED WEIGHTS (W) AND PREDICTOR CDFS (C)
# YOU NEED TO FILL THIS PART WITH CODE:
weights = np.array([bhattacharyya_distance(compute_normalized_histogram(current_image, s), q) for s in S.T])
weights /= weights.sum()
W = weights
# COMPUTE CDF
cdf = np.cumsum(weights)
# CREATE DETECTOR PLOTS
images_processed += 1
if 0 == images_processed%10:
frame_index_to_avg_state, frame_index_to_max_state = show_particles(
current_image, S, W, images_processed, ID, frame_index_to_avg_state, frame_index_to_max_state)
if GENERATE_VIDEO:
mean_bbox = frame_index_to_avg_state[images_processed]
max_bbox = frame_index_to_max_state[images_processed]
if GENERATE_VIDEO:
current_frame_bbox = np.average(S[[X_ind, Y_ind, W_ind, H_ind]], axis=1, weights=W)
bounded_frame = create_image_with_boundingbox(current_image, mean_bbox, max_bbox, current_frame_bbox)
slowed_down_vw.write(bounded_frame)
real_time_vw.write(bounded_frame)
if GENERATE_VIDEO:
slowed_down_vw.release()
real_time_vw.release()
with open(os.path.join(RESULTS, 'frame_index_to_avg_state.json'), 'w') as f:
json.dump(frame_index_to_avg_state, f, indent=4)
with open(os.path.join(RESULTS, 'frame_index_to_max_state.json'), 'w') as f:
json.dump(frame_index_to_max_state, f, indent=4)
if __name__ == "__main__":
main()
| StudentYuval/VP2023 | ex3/particle_filter.py | particle_filter.py | py | 11,564 | python | en | code | 0 | github-code | 36 |
9915451095 | def add_piece(music_list, song, information):
new_singer = information[2]
new_key = information[3]
if song not in music_list:
music_list[song] = {'composer': new_singer, 'key': new_key}
print(f'{song} by {new_singer} in {new_key} added to the collection!')
else:
print(f"{song} is already in the collection!")
return music_list
def remove_piece(music_list, song):
if song not in music_list:
print(f"Invalid operation! {song} does not exist in the collection.")
else:
del music_list[song]
print(f"Successfully removed {song}!")
return music_list
def change_key(music_list, song, information):
new_key = information[2]
if song not in music_list:
print(f"Invalid operation! {song} does not exist in the collection.")
else:
music_list[song]['key'] = new_key
print(f"Changed the key of {song} to {new_key}!")
return music_list
discography = {}
number_of_pieces = int(input())
for song in range(number_of_pieces):
piece, composer, key = input().split('|')
if piece not in discography:
discography[piece] = {'composer': composer, 'key': key}
while True:
input_data = input()
if input_data == 'Stop':
break
data = input_data.split('|')
command = data[0]
piece = data[1]
if command == 'Add':
discography = add_piece(discography, piece, data)
elif command == 'Remove':
discography = remove_piece(discography, piece)
elif command == 'ChangeKey':
discography = change_key(discography, piece, data)
for piece, info in discography.items():
composer = info['composer']
key = info['key']
print(f"{piece} -> Composer: {composer}, Key: {key}")
#################################### TASK CONDITION ############################
"""
Problem 3 - The Pianist
Problem for exam preparation for the Programming Fundamentals Course @SoftUni.
Submit your solutions in the SoftUni judge system at https://judge.softuni.org/Contests/Practice/Index/2525#2.
You are a pianist, and you like to keep a list of your favorite piano pieces.
Create a program to help you organize it and add, change, remove pieces from it!
On the first line of the standard input, you will receive an integer n – the number
of pieces you will initially have. On the next n lines, the pieces themselves will
follow with their composer and key, separated by "|" in the following format: "{piece}|{composer}|{key}".
Then, you will be receiving different commands, each on a new line, separated by "|",
until the "Stop" command is given:
• "Add|{piece}|{composer}|{key}":
o You need to add the given piece with the information about it to the other pieces and print:
"{piece} by {composer} in {key} added to the collection!"
o If the piece is already in the collection, print:
"{piece} is already in the collection!"
• "Remove|{piece}":
o If the piece is in the collection, remove it and print:
"Successfully removed {piece}!"
o Otherwise, print:
"Invalid operation! {piece} does not exist in the collection."
• "ChangeKey|{piece}|{new key}":
o If the piece is in the collection, change its key with the given one and print:
"Changed the key of {piece} to {new key}!"
o Otherwise, print:
"Invalid operation! {piece} does not exist in the collection."
Upon receiving the "Stop" command, you need to print all pieces in your collection in the following format:
"{Piece} -> Composer: {composer}, Key: {key}"
Input/Constraints
• You will receive a single integer at first – the initial number of pieces in the collection
• For each piece, you will receive a single line of text with information about it.
• Then you will receive multiple commands in the way described above until the command "Stop".
Output
• All the output messages with the appropriate formats are described in the problem description.
____________________________________________________________________________________________
Example_01
Input
3
Fur Elise|Beethoven|A Minor
Moonlight Sonata|Beethoven|C# Minor
Clair de Lune|Debussy|C# Minor
Add|Sonata No.2|Chopin|B Minor
Add|Hungarian Rhapsody No.2|Liszt|C# Minor
Add|Fur Elise|Beethoven|C# Minor
Remove|Clair de Lune
ChangeKey|Moonlight Sonata|C# Major
Stop
Output
Sonata No.2 by Chopin in B Minor added to the collection!
Hungarian Rhapsody No.2 by Liszt in C# Minor added to the collection!
Fur Elise is already in the collection!
Successfully removed Clair de Lune!
Changed the key of Moonlight Sonata to C# Major!
Fur Elise -> Composer: Beethoven, Key: A Minor
Moonlight Sonata -> Composer: Beethoven, Key: C# Major
Sonata No.2 -> Composer: Chopin, Key: B Minor
Hungarian Rhapsody No.2 -> Composer: Liszt, Key: C# Minor
Explanaton
After we receive the initial pieces with their info,
we start receiving commands. The first two commands are to
add a piece to the collection, and since the pieces are not
already added, we manage to add them. The third add command,
however, attempts to add a piece, which is already in the collection,
so we print a special message and don't add the piece.
After that, we receive the remove command, and since the piece
is in the collection, we remove it successfully.
Finally, the last command says to change the key of a piece.
Since the key is present in the collection, we modify its key.
We receive the Stop command, print the information about the pieces, and the program ends.
____________________________________________________________________________________________
Example_02
Input
4
Eine kleine Nachtmusik|Mozart|G Major
La Campanella|Liszt|G# Minor
The Marriage of Figaro|Mozart|G Major
Hungarian Dance No.5|Brahms|G Minor
Add|Spring|Vivaldi|E Major
Remove|The Marriage of Figaro
Remove|Turkish March
ChangeKey|Spring|C Major
Add|Nocturne|Chopin|C# Minor
Stop
Output
Spring by Vivaldi in E Major added to the collection!
Successfully removed The Marriage of Figaro!
Invalid operation! Turkish March does not exist in the collection.
Changed the key of Spring to C Major!
Nocturne by Chopin in C# Minor added to the collection!
Eine kleine Nachtmusik -> Composer: Mozart, Key: G Major
La Campanella -> Composer: Liszt, Key: G# Minor
Hungarian Dance No.5 -> Composer: Brahms, Key: G Minor
Spring -> Composer: Vivaldi, Key: C Major
Nocturne -> Composer: Chopin, Key: C# Minor
""" | qceka88/Fundametals-Module | 003 - Previous final Exams/01 Final Exam - Retake/03the_pianist.py | 03the_pianist.py | py | 6,378 | python | en | code | 8 | github-code | 36 |
12545797664 | #!/usr/bin/env python
def check_path(fp):
import os
if not os.path.exists(fp):
raise FileNotFoundError("Could not find the file {}".format(fp))
def main(sew_file, orb_file, hdf_file, symmetry, index):
import re
import numpy as np
import h5py
print(' ')
print(' M U L L I K E N A N A L Y S I S')
print(' of ')
print(' Molecular Orbital in terms of Atomic Orbital wt.%')
print(' ')
print('For questions/suggestions contact Gaurab Ganguly')
print(' gaurabganguly1989@gmail.com')
print(' ')
print('Molecular Orbital of interest:')
print('------------------------------')
print('Symmetry label=', symmetry,', Index=', index)
print(' ')
print('Files from Molcas/OpenMolcas Calculation:')
print('-----------------------------------------')
print('Seward file :', sew_file)
print('Orbital file :', orb_file)
print('HDF5 file :', hdf_file)
print(' ')
print(' ')
count = [] # index of basis fn (all irreps combined)
basisfn = [] # total number of basis fn (all irreps combined)
irrep = [] # irreps of the point group
sym_label = [] # indexing the irreps 1,2,3,...
sym_bas = [] # number of basis fn in each irrep
sym_block = [] # elements of AO overlap matrix in each irrep block
coeff = [] # store the MO coefficients of the requested MO in a list
check_path(sew_file)
check_path(orb_file)
check_path(hdf_file)
#Reading basis information from the provided SEWARD file:
with open(sew_file, 'r') as sfile:
for line in sfile:
if re.search(r'Basis Label Type Center', line):
for line in sfile:
if re.search(r'Basis set specifications \:', line):
break
if re.search(r'\W\d', line):
count.append(int(line.split()[0]))
basisfn.append(line.split()[1] + "-" + (line.split()[2]))
if len(count) == 0 and len(basisfn) == 0:
raise ValueError("Could not find basis set table in seward output file {}".format(sew_file))
with open(sew_file, 'r') as sfile:
lines = sfile.readlines()
try:
point_group = [x for x in lines if 'Character Table' in x][0].split()[3]
symmetry_species = [x for x in lines if 'Symmetry species' in x][0]
basis_functions = [x for x in lines if 'Basis functions' in x][-1]
#print("BAS", basis_functions)
except IndexError:
raise IndexError("Could not find 'Character Table', 'Symmetry species', or 'Basis functions' " \
+"search strings in seward output file {}".format(sew_file))
num_of_irreps = len(re.findall(r'\d+', basis_functions))
if num_of_irreps == 0:
raise ValueError("Did not find any Irreps. in seward output file {}".format(sew_file))
for i in range(num_of_irreps):
sym_label.append(i+1)
irrep.append(symmetry_species.split()[i+2])
sym_bas.append(int(basis_functions.split()[i+2]))
sym_block.append(int(basis_functions.split()[i+2])**2)
# Reading orbitals from GssOrb/ScfOrb/RASOrb/PT2Orb/SONOrb or any orbitals file:
search_string = r'\* ORBITAL{:>5d}{:>5d}'
with open(orb_file, 'r') as ofile:
for line in ofile:
if re.search(search_string.format(symmetry, index), line):
for line in ofile:
if re.search(search_string.format(symmetry, index+1), line):
break
if re.search(r'\s', line):
for item in line.strip().split():
coeff.append(float(item))
if len(coeff) == 0 and re.search(search_string.format(symmetry, index), line):
# found the search string
raise ValueError("Did not find orbitals in orbital file {}".format(orb_file))
elif re.search(search_string.format(symmetry, index), line) is not None:
# did not find the search string
raise RuntimeError("Something else went wrong.......Help me PLS. :(")
# Reading AO overlap integrals from the provided '.h5' file:
with h5py.File(hdf_file, 'r') as hdf:
overlap = np.array(hdf.get('AO_OVERLAP_MATRIX'))
print(' POINT GROUP =', point_group)
print('-------------------------------------------------------------')
print('Symm. label Irrep. No. of MOs')
print('-------------------------------------------------------------')
template = ' {:>10d} {:<10s} {:>10d}'
for i in range(num_of_irreps):
print(template.format(i+1, symmetry_species.split()[i+2], int(basis_functions.split()[i+2])))
print('-------------------------------------------------------------')
start_bas = 0
start_block = 0
end_bas = 0
end_block = 0
try:
if symmetry == 1:
end_bas = start_bas + sym_bas[0]
end_block = start_block + sym_block[0]
bas = np.array(basisfn[start_bas:end_bas])
block = np.reshape(overlap[start_block:end_block], (sym_bas[0], sym_bas[0]))
else:
for i in range(symmetry-1):
start_bas += sym_bas[i]
start_block += sym_block[i]
for i in range(symmetry):
end_bas += sym_bas[i]
end_block += sym_block[i]
bas = np.array(basisfn[start_bas:end_bas])
block = np.reshape(overlap[start_block:end_block], (sym_bas[symmetry-1],
sym_bas[symmetry-1]))
# TODO: find out what exception it raises that you would have to deal with.
# having a general Exception is not good to do.
# python has the great ability to handle different error cases separately and you can give the
# user valuable information as to what went wrong when you raise the appropriate error.
except Exception:
print("Error Exit:")
print("Symmetry label", symmetry, "is not possible for", point_group, "point group!")
print("Check the table and re run.")
# Multiplying coeff*overlap*coeff (CSC) to get MO wt%
if symmetry == 0 or index == 0:
# TODO: here a raise ValueError would be more appropriate also it will terminate the program
print("Error Exit:")
print("Symmetry or Index can't be 0!")
print("Check the Symmetry label for Irreps in the table and re run.")
elif symmetry not in sym_label:
pass
elif index > sym_bas[symmetry-1]:
# TODO: here a raise ValueError would be more appropriate also it will terminate the program
raise ValueError("Error Exit: Index", index, "is beyond range for", irrep[symmetry-1], \
"Irrep! Check the table and re run.")
#print("Error Exit:")
#print("Index", index, "is beyond range for", irrep[symmetry-1], "Irrep!")
#print("Check the table and re run.")
elif symmetry in sym_label and index <= sym_bas[symmetry-1]:
print('')
print('Mulliken Analysis of:')
template = "n-th ('n ={:>3}') MO in '{}' Symmetry (symm. label = '{}')."
print(template.format(index, irrep[symmetry - 1], symmetry))
print('All AO function with > 1.0% weight in the MO is printed.')
print('-------------------------------------------------------------')
print('AO-func. wt.% ')
print('-------------------------------------------------------------')
for i in range(len(coeff)):
tmp = []
for j in range(len(coeff)):
tmp.append(coeff[i] * block[i][j] * coeff[j])
if abs(sum(tmp))*100 > 1.0 : # user can change the thresold
print('{:<10s} {:>10.1f}%'.format(bas[i], sum(tmp)*100))
print('-------------------------------------------------------------')
print('')
else:
# TODO: here a raise ValueError would be more appropriate also it will terminate the program
raise ValueError("Error Exit: Symmetry label and Index is not possible! Check and re run.")
#print("Error Exit: Symmetry label and Index is not possible! Check and re run.")
if __name__ == "__main__":
import argparse, pathlib
parser = argparse.ArgumentParser(description="This program calculates AO wt% in a given MO.")
parser.add_argument('sew_file', type=pathlib.Path, metavar='1) file.out',
help="Gateway/Seward output file with print level = 3.")
parser.add_argument('orb_file', type=pathlib.Path, metavar='2) file.SCF/RAS/SONOrb',
help="Orbital file with MO co-efficients.")
parser.add_argument('hdf_file', type=pathlib.Path, metavar='3) file.h5',
help="HDF5 file for AO overlap matrix.")
parser.add_argument('-s', '--symmetry', type=int, metavar='MO_symmetry', required=True,
help="Symmetry/Irrep of the orbital of interest.")
parser.add_argument('-i', '--index', type=int, metavar='MO_Index', required=True,
help="Orbital index in the particular Symmetry/Irrep.")
args = parser.parse_args()
main(args.sew_file, args.orb_file, args.hdf_file, args.symmetry, args.index)
| gaurabganguly1989/molcas_mo2ao_weights | molcas_ao_weights.py | molcas_ao_weights.py | py | 9,516 | python | en | code | 0 | github-code | 36 |
30722628061 | #baekjoon_1072_게임
#=== import module ===#
#=== variable declare ===#
#=== Function define ===#
#=== main function ===#
x,y = map(int,input().split()); #게임 횟수, 이긴 횟수
currentRate = y*100 // x;
if currentRate >= 99: print(-1); exit(0);
high = 1000000000;
row = 0;
result = -1;
while(row <= high):
mid = (high + row) // 2;
rate = (y + mid) * 100 // (x + mid);
if currentRate >= rate:
row = mid + 1;
result = mid + 1;
else:
high = mid - 1;
print(result); | Hoony0321/Algorithm | 2021_12/29/code.py | code.py | py | 504 | python | en | code | 0 | github-code | 36 |
18873944718 | from decimal import Decimal
from background.celery_app import app as celery_app
from services.csmoney import DotaDataProcessor
from settings.config import SERVICES, CSMONEY
from models.dota.item import DotaItem
from models.dota.hero import Hero
from models.dota.service_price import ServicePrice
from models.dota.specification import Rarity, Quality, ItemType, Slot
from extentions import db
@celery_app.task(name="update_service_price", queue="items_base_queue")
def update_service_price(
game_id: int,
item_hash_name: str,
price: Decimal,
service: str,
hero_name: str = None,
rarity: str = None,
item_type: str = None,
slot: str = None,
quality: str = None
):
if service not in SERVICES:
raise f"No such service {service}"
if game_id == 570:
item = DotaItem.query.filter_by(name=item_hash_name).one_or_none()
if not item and service == CSMONEY:
item = DotaItem(
name=item_hash_name,
quality=Quality(quality).name,
rarity=Rarity(rarity).name,
item_type=ItemType(item_type).name,
slot=Slot(slot).name
)
hero = Hero.query.filter_by(name=hero_name).one_or_none()
if not hero and hero_name:
hero = Hero(
name=hero_name
)
hero.items.append(item)
db.session.add(hero)
else:
if hero:
item.hero_id = hero.id
db.session.add(item)
if not item:
return {
"error": "Cant update with not cs money"
}
service_price = ServicePrice.query.filter_by(service=service, item=item).one_or_none()
if not service_price:
service_price = ServicePrice(
service=service,
price=price
)
item.prices.append(service_price)
else:
service_price.price = price
if service_price:
db.session.add(service_price)
db.session.commit()
elif game_id == 730:
pass
else:
raise Exception("No such a game")
| NeKadgar/game_market_items_base | background/tasks/item_base.py | item_base.py | py | 2,243 | python | en | code | 0 | github-code | 36 |
16627412673 | '''
*****************************************************************************************
*
* =================================================
* Pharma Bot Theme (eYRC 2022-23)
* =================================================
*
* This script is intended for implementation of Task 4A
* of Pharma Bot (PB) Theme (eYRC 2022-23).
*
* Filename: task_4a.py
* Created:
* Last Modified: 02/01/2023
* Author: e-Yantra Team
*
* This software is made available on an "AS IS WHERE IS BASIS".
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
*****************************************************************************************
'''
# Team ID: [ 3004 ]
# Author List: [ Aryan Bawankar, Advait Dhamorikar ]
# Filename: task_4a.py
# Functions: [ place_packages, place_traffic_signals, place_start_end_nodes, place_horizontal_barricade, place_vertical_barricade]
#
####################### IMPORT MODULES #######################
## You are not allowed to make any changes in this section. ##
##############################################################
import numpy as np
import cv2
from zmqRemoteApi import RemoteAPIClient
import zmq
import os
import time
##############################################################
################# ADD UTILITY FUNCTIONS HERE #################
##############################################################
def place_packages(medicine_package_details, sim, all_models):
"""
Purpose:
---
This function takes details (colour, shape and shop) of the packages present in
the arena (using "detect_arena_parameters" function from task_1a.py) and places
them on the virtual arena. The packages should be inserted only into the
designated areas in each shop as mentioned in the Task document.
Functions from Regular API References should be used to set the position of the
packages.
Input Arguments:
---
`medicine_package_details` : [ list ]
nested list containing details of the medicine packages present.
Each element of this list will contain
- Shop number as Shop_n
- Color of the package as a string
- Shape of the package as a string
- Centroid co-ordinates of the package
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Example call:
---
all_models = place_packages(medicine_package_details, sim, all_models)
"""
models_directory = os.getcwd()
packages_models_directory = os.path.join(models_directory, "package_models")
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
flag1 = 0
flag2 = 0
flag3 = 0
flag4 = 0
flag5 = 0
for i in medicine_package_details:
shop = i[0]
if i[2] == "Circle":
shape = "cylinder"
elif i[2] == "Square":
shape = "cube"
elif i[2] == "Triangle":
shape = "cone"
# Setting Coordinate
if shop == "Shop_1":
if flag1 == 1:
x = x + 0.09
else:
x = -0.9 + 0.044
flag1 = 1
elif shop == "Shop_2":
if flag2 == 1:
x = x + 0.09
else:
x = -0.54 + 0.044
flag2 = 1
elif shop == "Shop_3":
if flag3 == 1:
x = x + 0.09
else:
x = -0.18 + 0.044
flag3 = 1
elif shop == "Shop_4":
if flag4 == 1:
x = x + 0.09
else:
x = 0.18 + 0.044
flag4 = 1
elif shop == "Shop_5":
if flag5 == 1:
x = x + 0.09
else:
x = 0.54 + 0.044
flag5 = 1
package = i[1] + "_" + shape
package_ttm = package + ".ttm"
# print(shop, package)
package_ttm = os.path.join(packages_models_directory, package_ttm)
medicine = sim.loadModel(package_ttm)
sim.setObjectParent(medicine, arena, False)
sim.setObjectAlias(medicine, package)
sim.setObjectPosition(medicine, arena, [x, 0.65, 0.015])
all_models.append(medicine)
####################################################################
return all_models
def place_traffic_signals(traffic_signals, sim, all_models):
"""
Purpose:
---
This function takes position of the traffic signals present in
the arena (using "detect_arena_parameters" function from task_1a.py) and places
them on the virtual arena. The signal should be inserted at a particular node.
Functions from Regular API References should be used to set the position of the
signals.
Input Arguments:
---
`traffic_signals` : [ list ]
list containing nodes in which traffic signals are present
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
None
Example call:
---
all_models = place_traffic_signals(traffic_signals, sim, all_models)
"""
models_directory = os.getcwd()
traffic_sig_model = os.path.join(models_directory, "signals", "traffic_signal.ttm" )
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
for i in traffic_signals:
a = i[0]
b = i[1]
x = 0
y = 0
# setting up X Coordinate
if a == 'A':
x = -0.9
elif a == 'B':
x = -0.54
elif a == 'C':
x = -0.18
elif a == 'D':
x = 0.18
elif a == 'E':
x = 0.54
elif a == 'F':
x = 0.9
# setting up Y Coordinate
if b == '1':
y = 0.9
elif b == '2':
y = 0.54
elif b == '3':
y = 0.18
elif b == '4':
y = -0.18
elif b == '5':
y = -0.54
elif b == '6':
y = -0.9
name = "Signal_" + i
position = [x, y, 0.15588]
signal = sim.loadModel(traffic_sig_model)
sim.setObjectParent(signal, arena, False)
sim.setObjectAlias(signal, name)
sim.setObjectPosition(signal, arena, position)
all_models.append(signal)
####################################################################
return all_models
def place_start_end_nodes(start_node, end_node, sim, all_models):
"""
Purpose:
---
This function takes position of start and end nodes present in
the arena and places them on the virtual arena.
The models should be inserted at a particular node.
Functions from Regular API References should be used to set the position of the
start and end nodes.
Input Arguments:
---
`start_node` : [ string ]
`end_node` : [ string ]
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
---
None
Example call:
---
all_models = place_start_end_nodes(start_node, end_node, sim, all_models)
"""
models_directory = os.getcwd()
start_node_model = os.path.join(models_directory, "signals", "start_node.ttm" )
end_node_model = os.path.join(models_directory, "signals", "end_node.ttm" )
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
a = start_node[0]
b = start_node[1]
x = 0
y = 0
# setting up X Coordinate
if a == 'A':
x = -0.9
elif a == 'B':
x = -0.54
elif a == 'C':
x = -0.18
elif a == 'D':
x = 0.18
elif a == 'E':
x = 0.54
elif a == 'F':
x = 0.9
# setting up Y Coordinate
if b == '1':
y = 0.9
elif b == '2':
y = 0.54
elif b == '3':
y = 0.18
elif b == '4':
y = -0.18
elif b == '5':
y = -0.54
elif b == '6':
y = -0.9
name = "Start_Node"
position = [x, y, 0.15588]
start_node = sim.loadModel(start_node_model)
sim.setObjectParent(start_node, arena, False)
sim.setObjectAlias(start_node, name)
sim.setObjectPosition(start_node, arena, position)
all_models.append(start_node)
a = end_node[0]
b = end_node[1]
x = 0
y = 0
# setting up X Coordinate
if a == 'A':
x = -0.9
elif a == 'B':
x = -0.54
elif a == 'C':
x = -0.18
elif a == 'D':
x = 0.18
elif a == 'E':
x = 0.54
elif a == 'F':
x = 0.9
# setting up Y Coordinate
if b == '1':
y = 0.9
elif b == '2':
y = 0.54
elif b == '3':
y = 0.18
elif b == '4':
y = -0.18
elif b == '5':
y = -0.54
elif b == '6':
y = -0.9
name = "End_Node"
position = [x, y, 0.15588]
end_node = sim.loadModel(end_node_model)
sim.setObjectParent(end_node, arena, False)
sim.setObjectAlias(end_node, name)
sim.setObjectPosition(end_node, arena, position)
all_models.append(end_node)
####################################################################
return all_models
def place_horizontal_barricade(horizontal_roads_under_construction, sim, all_models):
"""
Purpose:
---
This function takes the list of missing horizontal roads present in
the arena (using "detect_arena_parameters" function from task_1a.py) and places
horizontal barricades on virtual arena. The barricade should be inserted
between two nodes as shown in Task document.
Functions from Regular API References should be used to set the position of the
horizontal barricades.
Input Arguments:
---
`horizontal_roads_under_construction` : [ list ]
list containing missing horizontal links
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
---
None
Example call:
---
all_models = place_horizontal_barricade(horizontal_roads_under_construction, sim, all_models)
"""
models_directory = os.getcwd()
horiz_barricade_model = os.path.join(models_directory, "barricades", "horizontal_barricade.ttm" )
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
for i in horizontal_roads_under_construction:
nodes = i
A = nodes[0]
# setting up X Coordinate
if A == 'A':
x = -0.9
elif A == 'B':
x = -0.54
elif A == 'C':
x = -0.18
elif A == 'D':
x = 0.18
elif A == 'E':
x = 0.54
elif A == 'F':
x = 0.9
x = x + 0.18
C = nodes[1]
# setting up Y Coordinate
if C == '1':
y = 0.9
elif C == '2':
y = 0.54
elif C == '3':
y = 0.18
elif C == '4':
y = -0.18
elif C == '5':
y = -0.54
elif C == '6':
y = -0.9
positions = [x, y, 0.027]
name = "Horizontal_missing_road_" + A + C + "_" + nodes[3] + nodes[4]
h_barricade = sim.loadModel(horiz_barricade_model)
sim.setObjectParent(h_barricade, arena, False)
sim.setObjectAlias(h_barricade, name)
sim.setObjectPosition(h_barricade, arena, positions)
all_models.append(h_barricade)
####################################################################
return all_models
def place_vertical_barricade(vertical_roads_under_construction, sim, all_models):
"""
Purpose:
---
This function takes the list of missing vertical roads present in
the arena (using "detect_arena_parameters" function from task_1a.py) and places
vertical barricades on virtual arena. The barricade should be inserted
between two nodes as shown in Task document.
Functions from Regular API References should be used to set the position of the
vertical barricades.
Input Arguments:
---
`vertical_roads_under_construction` : [ list ]
list containing missing vertical links
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
---
None
Example call:
---
all_models = place_vertical_barricade(vertical_roads_under_construction, sim, all_models)
"""
models_directory = os.getcwd()
vert_barricade_model = os.path.join(models_directory, "barricades", "vertical_barricade.ttm" )
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
for i in vertical_roads_under_construction:
nodes = i
A = nodes[0]
# setting up X Coordinate
if A == 'A':
x = -0.9
elif A == 'B':
x = -0.54
elif A == 'C':
x = -0.18
elif A == 'D':
x = 0.18
elif A == 'E':
x = 0.54
elif A == 'F':
x = 0.9
C = nodes[1]
# setting up Y Coordinate
if C == '1':
y = 0.9
elif C == '2':
y = 0.54
elif C == '3':
y = 0.18
elif C == '4':
y = -0.18
elif C == '5':
y = -0.54
elif C == '6':
y = -0.9
y = y - 0.18
positions = [x, y, 0.027]
name = "Vertical_missing_road_" + A + C + "_" + nodes[3] + nodes[4]
v_barricade = sim.loadModel(vert_barricade_model)
sim.setObjectParent(v_barricade, arena, False)
sim.setObjectAlias(v_barricade, name)
sim.setObjectPosition(v_barricade, arena, positions)
all_models.append(v_barricade)
####################################################################
return all_models
if __name__ == "__main__":
client = RemoteAPIClient()
sim = client.getObject('sim')
# arena = sim.getObject('/Arena')
aruco_handle = sim.getObject('/aruco_3')
arena = sim.getObject('/Arena')
# sim.setObjectParent(aruco_handle, arena, False)
# sim.setObjectAlias(aruco_handle, "marker")
sim.setObjectPosition(aruco_handle, -1, [0.15, 0.15, 0.15])
sim.setObjectOrientation(aruco_handle, -1, [0, 0, 45])
# path directory of images in test_images folder
img_dir = os.getcwd() + "/test_imgs/"
i = 0
config_img = cv2.imread(img_dir + 'maze_' + str(i) + '.png')
print('\n============================================')
print('\nFor maze_0.png')
# object handles of each model that gets imported to the scene can be stored in this list
# at the end of each test image, all the models will be removed
all_models = []
# import task_1a.py. Make sure that task_1a.py is in same folder as task_4a.py
task_1 = __import__('task_1a')
detected_arena_parameters = task_1.detect_arena_parameters(config_img)
# obtain required arena parameters
medicine_package_details = detected_arena_parameters["medicine_packages"]
traffic_signals = detected_arena_parameters['traffic_signals']
start_node = detected_arena_parameters['start_node']
end_node = detected_arena_parameters['end_node']
horizontal_roads_under_construction = detected_arena_parameters[
'horizontal_roads_under_construction']
vertical_roads_under_construction = detected_arena_parameters[
'vertical_roads_under_construction']
print("[1] Setting up the scene in CoppeliaSim")
all_models = place_packages(medicine_package_details, sim, all_models)
all_models = place_traffic_signals(traffic_signals, sim, all_models)
all_models = place_horizontal_barricade(
horizontal_roads_under_construction, sim, all_models)
all_models = place_vertical_barricade(
vertical_roads_under_construction, sim, all_models)
all_models = place_start_end_nodes(start_node, end_node, sim, all_models)
print("[2] Completed setting up the scene in CoppeliaSim")
# wait for 10 seconds and then remove models
time.sleep(10)
print("[3] Removing models for maze_0.png")
for i in all_models:
sim.removeModel(i)
choice = input(
'\nDo you want to run your script on all test images ? => "y" or "n": ')
if choice == 'y':
for i in range(1, 5):
print('\n============================================')
print('\nFor maze_' + str(i) + '.png')
config_img = cv2.imread(img_dir + 'maze_' + str(i) + '.png')
# object handles of each model that gets imported to the scene can be stored in this list
# at the end of each test image, all the models will be removed
all_models = []
# import task_1a.py. Make sure that task_1a.py is in same folder as task_4a.py
task_1 = __import__('task_1a')
detected_arena_parameters = task_1.detect_arena_parameters(
config_img)
# obtain required arena parameters
medicine_package_details = detected_arena_parameters["medicine_packages"]
traffic_signals = detected_arena_parameters['traffic_signals']
start_node = detected_arena_parameters['start_node']
end_node = detected_arena_parameters['end_node']
horizontal_roads_under_construction = detected_arena_parameters[
'horizontal_roads_under_construction']
vertical_roads_under_construction = detected_arena_parameters[
'vertical_roads_under_construction']
print("[1] Setting up the scene in CoppeliaSim")
place_packages(medicine_package_details, sim, all_models)
place_traffic_signals(traffic_signals, sim, all_models)
place_horizontal_barricade(
horizontal_roads_under_construction, sim, all_models)
place_vertical_barricade(
vertical_roads_under_construction, sim, all_models)
place_start_end_nodes(start_node, end_node, sim, all_models)
print("[2] Completed setting up the scene in CoppeliaSim")
# wait for 10 seconds and then remove models
time.sleep(10)
print("[3] Removing models for maze_" + str(i) + '.png')
for i in all_models:
sim.removeModel(i)
| advait-0/eyrc22_PB_3004 | Task 4/Task 4A/task_4a.py | task_4a.py | py | 20,080 | python | en | code | 3 | github-code | 36 |
26493496083 |
from django.urls import path
from .import views
urlpatterns = [
path("", views.index, name="storehome"),
path("about/", views.about, name="aboutus"),
path("contact/", views.contact, name="contactus"),
path("Seller/", views.seller, name="sellerid"), #Seller/ is the html file name not the function name
path("search/", views.search, name="searchbar"),
path("productview/<int:getid>", views.prodView, name="productview1"),
path("checkout/", views.checkout, name="checkout"),
path("signup/", views.signup, name="signup"),
path("handlelogin/", views.handlelogin, name="handlelogin"),
path("handlelogout/", views.handlelogout, name="handlelogout")
] | princegupta003005/E-commerce-Website | Anapp/store/urls.py | urls.py | py | 697 | python | en | code | 0 | github-code | 36 |
5213223570 | from queue import Queue
class MovingAverage:
def __init__(self, size: int):
"""
Initialize your data structure here.
"""
self.rolling_sum = 0
self.size = size
self.q = Queue()
def next(self, val: int) -> float:
self.q.put(val)
self.rolling_sum += val
if self.q.qsize() > self.size:
old = self.q.get()
self.rolling_sum -= old
return self.rolling_sum / self.q.qsize()
# Your MovingAverage object will be instantiated and called as such:
# obj = MovingAverage(size)
# param_1 = obj.next(val) | tugloo1/leetcode | problem_346.py | problem_346.py | py | 605 | python | en | code | 0 | github-code | 36 |
40857469466 | import requests
import show_route
import serial
class Navi_auto:
def __init__(self):
self.key = '1b1779b2176bc8d85a93f9aef22b8a53'
self.latitude = 1
self.longitude = 0
self.start_coordinate = [116.481028, 39.989643]
self.desti_coordinate = [116.434446, 39.90816]
self.res_url = ""
def get_destination(self, destination, region='320111'):
url = f"https://restapi.amap.com/v5/place/text?key={self.key}&keywords={destination}®ion={region}&city_limit=true&show_fields=children"
data_dict = requests.get(url).json()
pos_dict = {}
for poi in data_dict["pois"]:
pos_dict[poi['name']] = poi['location']
return pos_dict
def get_coordinate(self, start_longitude, start_latitude, desti_longitude, desti_latitude):
self.start_coordinate[self.longitude] = start_longitude
self.start_coordinate[self.latitude] = start_latitude
self.desti_coordinate[self.longitude] = desti_longitude
self.desti_coordinate[self.latitude] = desti_latitude
def get_walking_url(self):
start_pos = str(self.start_coordinate).strip('[').strip(']').replace(' ', '')
desti_pos = str(self.desti_coordinate).strip('[').strip(']').replace(' ', '')
self.res_url = f"https://restapi.amap.com/v3/direction/walking?key={self.key}&origin={start_pos}&destination={desti_pos}"
def get_bike_url(self):
start_pos = str(self.start_coordinate).strip('[').strip(']').replace(' ', '')
desti_pos = str(self.desti_coordinate).strip('[').strip(']').replace(' ', '')
self.res_url = f"https://restapi.amap.com/v4/direction/bicycling?key={self.key}&origin={start_pos}&destination={desti_pos}"
def get_drive_url(self):
start_pos = str(self.start_coordinate).strip('[').strip(']').replace(' ', '')
desti_pos = str(self.desti_coordinate).strip('[').strip(']').replace(' ', '')
self.res_url = f"https://restapi.amap.com/v3/direction/driving?origin={start_pos}&destination={desti_pos}&key={self.key}"
def make_navi_data(self):
points = []
data = requests.get(self.res_url).json()
try:
paths = data["route"]["paths"]
polyline = paths[0]['steps'] # list
except Exception as e:
paths = data["data"]["paths"]
polyline = paths[0]['steps'] # list
for i in range(0, len(polyline)):
points.extend(polyline[i]['polyline'].split(';'))
show_route.gps_lon_lat.clear()
for i in range(0, len(points)):
x, y = map(float, points[i].split(","))
show_route.gps_lon_lat.append(y)
show_route.gps_lon_lat.append(x)
show_route.create_pic_data()
class device:
def __init__(self):
self.location = []
self.baud_rate = 115200
self.port = ''
self.interface = ''
self.GPS_Data = ''
self.create_interface()
def create_interface(self):
for i in range(0, 100):
self.port = 'COM'
self.port = self.port + str(i)
if i == 7 or i == 9:
continue
try:
self.interface = serial.Serial(self.port, self.baud_rate, timeout=1)
self.GPS_Data = self.interface.readline().decode('utf-8')
if len(self.GPS_Data) >= 8 and self.GPS_Data[0] != '2':
print(self.GPS_Data)
print("Successfully find the device!")
print("Port:{}".format(self.port))
break
else:
print(self.GPS_Data)
print("Connected to {},but it is not the device".format(self.port))
except Exception as e:
print("{} is not the device".format(self.port))
print("error msg:{}".format(e))
def get_location(self):
while True:
self.GPS_Data = self.interface.readline().decode('utf-8')
if self.GPS_Data.startswith('$GNRMC'):
fields = self.GPS_Data.split(',')
self.location = []
self.location.append(show_route.DegreeConvert(float(fields[3])))
self.location.append(show_route.DegreeConvert(float(fields[5])))
return self.location
if __name__ == '__main__':
a = device()
a.get_location()
| haiboCode233/KivyPlusAR | GPSAPI.py | GPSAPI.py | py | 4,415 | python | en | code | 0 | github-code | 36 |
12151884978 | import torch # noqa
from model import softmax_classifier
from model import softmax_classifier_backward
from model import cross_entropy
from utils import Metric, accuracy # noqa
__all__ = ['create_model', 'test_epoch', 'test_epoch', 'train_loop']
#################################################
# create_model
#################################################
def create_model():
"""Creates a Softmax Classifier model `(w, b)`.
Returns:
w (torch.Tensor): The weight tensor, has shape `(num_classes, in_dim)`.
b (torch.Tensor): The bias tensor, has shape `(num_classes,)`.
"""
# BEGIN SOLUTION
num_classes = 10
in_dim = 28*28
# Initialize W,b with uniform distribution on the interval [0,1)
w = torch.rand(num_classes, in_dim)
b = torch.rand(num_classes)
# Scale & Shift W,b distribution to the interval (-sqrt(k), sqrt(k))
# https://pytorch.org/docs/stable/generated/torch.nn.Linear.html
sqrt_k = (1 / in_dim)**0.5
w = (w * (2 * sqrt_k)) - sqrt_k
b = (b * (2 * sqrt_k)) - sqrt_k
# END SOLUTION
return w, b
#################################################
# train_epoch
#################################################
def train_epoch(w, b, lr, loader):
"""Trains over an epoch, and returns the accuracy and loss over the epoch.
Note: The accuracy and loss are average over the epoch. That's different from
running the classifier over the data again at the end of the epoch, as the
weights changed over the iterations. However, it's a common practice, since
iterating over the training set (again) is time and resource exhustive.
Args:
w (torch.Tensor): The weight tensor, has shape `(num_classes, in_dim)`.
b (torch.Tensor): The bias tensor, has shape `(num_classes,)`.
lr (float): The learning rate.
loader (torch.utils.data.DataLoader): A data loader. An iterator over the dataset.
Returns:
acc_metric (Metric): The accuracy metric over the epoch.
loss_metric (Metric): The loss metric over the epoch.
"""
device = w.device
loss_metric = Metric()
acc_metric = Metric()
for x, y in loader:
x, y = x.to(device=device), y.to(device=device)
# BEGIN SOLUTION
# NOTE: In your solution you MUST keep the loss in a tensor called `loss`
# NOTE: In your solution you MUST keep the acurracy in a tensor called `acc`
num_classes, in_dim = w.shape
batch_size = x.shape[0]
# Reshape the input x
x = x.reshape(batch_size, in_dim)
# Run the model to get a prediction
pred = softmax_classifier(x, w, b)
# Compute the cross-entropy loss
loss = cross_entropy(pred, y)
acc = accuracy(pred, y)
# Compute the gradients of the weights
softmax_classifier_backward(x, w, b, pred, y)
# Update the weights
w -= lr * w.grad
b -= lr * b.grad
# END SOLUTION
loss_metric.update(loss.item(), x.size(0))
acc_metric.update(acc.item(), x.size(0))
return loss_metric, acc_metric
#################################################
# test_epoch
#################################################
def test_epoch(w, b, loader):
"""Evaluating the model at the end of the epoch.
Args:
w (torch.Tensor): The weight tensor, has shape `(num_classes, in_dim)`.
b (torch.Tensor): The bias tensor, has shape `(num_classes,)`.
loader (torch.utils.data.DataLoader): A data loader. An iterator over the dataset.
Returns:
acc_metric (Metric): The accuracy metric over the epoch.
loss_metric (Metric): The loss metric over the epoch.
"""
device = w.device
loss_metric = Metric()
acc_metric = Metric()
for x, y in loader:
x, y = x.to(device=device), y.to(device=device)
# BEGIN SOLUTION
# NOTE: In your solution you MUST keep the loss in a tensor called `loss`
# NOTE: In your solution you MUST keep the acurracy in a tensor called `acc`
num_classes, in_dim = w.shape
batch_size = x.shape[0]
# Reshape the input x
x = x.reshape(batch_size, in_dim)
# Run the model to get a prediction
pred = softmax_classifier(x, w, b)
# Compute the cross-entropy loss
loss = cross_entropy(pred, y)
acc = accuracy(pred, y)
# END SOLUTION
loss_metric.update(loss.item(), x.size(0))
acc_metric.update(acc.item(), x.size(0))
return loss_metric, acc_metric
#################################################
# PROVIDED: train
#################################################
def train_loop(w, b, lr, train_loader, test_loader, epochs, test_every=1):
"""Trains the Softmax Classifier model and report the progress.
Args:
w (torch.Tensor): The weight tensor, has shape `(num_classes, in_dim)`.
b (torch.Tensor): The bias tensor, has shape `(num_classes,)`.
lr (float): The learning rate.
train_loader (torch.utils.data.DataLoader): The training set data loader.
test_loader (torch.utils.data.DataLoader): The test set data loader.
epochs (int): Number of training epochs.
test_every (int): How frequently to report progress on test data.
"""
for epoch in range(1, epochs + 1):
train_loss, train_acc = train_epoch(w, b, lr, train_loader)
print('Train', f'Epoch: {epoch:03d} / {epochs:03d}',
f'Loss: {train_loss.avg:7.4g}',
f'Accuracy: {train_acc.avg:.3f}',
sep=' ')
if epoch % test_every == 0:
test_loss, test_acc = test_epoch(w, b, test_loader)
print(' Test', f'Epoch: {epoch:03d} / {epochs:03d}',
f'Loss: {test_loss.avg:7.4g}',
f'Accuracy: {test_acc.avg:.3f}',
sep=' ')
| antebi-itai/Weizmann | DL for CV/HW1/Solution/Code/train.py | train.py | py | 5,534 | python | en | code | 0 | github-code | 36 |
17232778502 | import cv2
import numpy as np
def bitmap(n):
map = []
for i in range(256):
if i & pow(2, n-1) == pow(2, n-1):
map.append(255)
else:
map.append(0)
print(map)
return map
def bitimage(x, n):
image = x.copy()
height, width = image.shape
map = bitmap(n)
for rows in range(height):
for cols in range(width):
image[rows, cols] = map[image[rows, cols]]
return image
if __name__ == '__main__':
original = cv2.imread('imgae\Fig0314(a)(100-dollars).tif',
cv2.IMREAD_GRAYSCALE)
cv2.imshow('a', original)
"""
b = bitimage(original,1)
cv2.imshow('bitmap1',b)
c = bitimage(original,2)
cv2.imshow('bitmap2',c)
d = bitimage(original,3)
cv2.imshow('bitmap3',d)
e = bitimage(original,4)
cv2.imshow('bitmap4',e)
f = bitimage(original,5)
cv2.imshow('bitmap5',f)
g = bitimage(original,6)
cv2.imshow('bitmap6',g)
h = bitimage(original,7)
cv2.imshow('bitmap7',h)
i = bitimage(original,8)
cv2.imshow('bitmap8',i)
"""
bit5 = bitimage(original, 5)
bit6 = bitimage(original, 6)
bit7 = bitimage(original, 7)
bit8 = bitimage(original, 8)
bit5 = np.where(bit5 == 255, 16, 0)
bit6 = np.where(bit6 == 255, 32, 0)
bit7 = np.where(bit7 == 255, 64, 0)
bit8 = np.where(bit8 == 255, 128, 0)
re_7_8 = np.uint8(bit7 + bit8)
re_6_7_8 = np.uint8(bit6 + bit7 + bit8)
re_5_6_7_8 = np.uint8(bit5 + bit6 + bit7 + bit8)
cv2.imshow('re_7_8', re_7_8)
cv2.imshow('re_6_7_8', re_6_7_8)
cv2.imshow('re_5_6_7_8', re_5_6_7_8)
'''
a = np.array([1,3,4,5,6,1,1,1])
a = np.where(a==1,0,255)
'''
cv2.waitKey() | VJaGG/digital-image-processing | chapter2/3.2.4.2、bitmaplayer.py | 3.2.4.2、bitmaplayer.py | py | 1,805 | python | en | code | 0 | github-code | 36 |
1071457395 | from Module import getattendance,sendmail,gettime
users={'Prajay':['160117735101','160117735101','mprajay999@gmail.com']}
if __name__=='__main__':
for name in users:
try:
attendance = getattendance(users[name][0], users[name][1])
print(attendance)
sendmail(users[name][2], name, attendance,gettime())
except:
print('Server Error')
break
| mprajay999/Attendance-Notifier | Main.py | Main.py | py | 438 | python | en | code | 1 | github-code | 36 |
34781259647 | import h5py
import numpy as np
filename = 'weight_imgnet_ker5_h5/ResNet_18_ker5.h5'
h5f = h5py.File(filename, 'r')
cvsfmt = '%.18e' # covers upto float128
# get a List of data sets in group 'dd48'
# print('h5f:', h5f.shape)
# Get the data
lv0_keys = list(h5f.keys())
print("lv0: ", lv0_keys)
for keys0 in lv0_keys:
lv1_keys = list(h5f[keys0].keys())
print("lv1: ",lv1_keys)
for keys1 in lv1_keys:
lv2_keys = list(h5f[keys0][keys1].keys())
print("lv2: ",lv2_keys)
for keys2 in lv2_keys:
if (keys0 == 'bn') or (keys0 == 'conv0') or (keys0 == 'fully_connected'):
data = h5f[keys0][keys1][keys2]
np.savetxt('weight_imgnet_ker5_h5/w-'+str(keys0)+'-'+str(keys1)+'-'+str(keys2)+'.csv', np.reshape(data, [-1]), fmt=cvsfmt, delimiter=',')
else:
lv3_keys = list(h5f[keys0][keys1][keys2].keys())
for keys3 in lv3_keys:
data = h5f[keys0][keys1][keys2][keys3]
np.savetxt('weight_imgnet_ker5_h5/w-'+str(keys0)+'-'+str(keys1)+'-'+str(keys2)+'-'+str(keys3)+'.csv', np.reshape(data, [-1]), fmt=cvsfmt, delimiter=',')
| dwkim606/lattigo_conv | imgnet_read_h5.py | imgnet_read_h5.py | py | 1,175 | python | en | code | 0 | github-code | 36 |
28522686817 | from urbansim.abstract_variables.abstract_distance_to_SSS_dataset import abstract_distance_to_SSS_dataset
class distance_to_SSS_dataset(abstract_distance_to_SSS_dataset):
"""distance of parcel centroid to nearest SSS dataset point,
id name = dataset name_id, e.g. for busstop dataset, busstop_id
x coordinate field name = point_x
y coordinate field name = point_y"""
_return_type = "int32"
dataset_x_coord = "point_x"
dataset_y_coord = "point_y"
my_x_coord = "x_coord_sp"
my_y_coord = "y_coord_sp"
package = "urbansim_parcel"
from_dataset = "parcel"
from opus_core.tests import opus_unittest
from numpy import array
from opus_core.tests.utils.variable_tester import VariableTester
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['urbansim_parcel', 'urbansim'],
test_data={
'parcel':
{
"parcel_id": array([1, 2, 3, 4, 5]),
"x_coord_sp": array([1, 2, 3, 3, 1 ]),
"y_coord_sp": array([1, 1, 1, 2, 4 ]),
},
'busstop':
{
"busstop_id":array([1,2,3,4,5,6,7]),
"point_x":array([1,2,3,2,2,1,3]),
"point_y":array([1,1,1,2,2,1,3]),
},
})
should_be = array([0, 0, 0, 1, 2])
instance_name = 'urbansim_parcel.parcel.distance_to_busstop_dataset'
tester.test_is_equal_for_family_variable(self, should_be, instance_name)
if __name__=='__main__':
opus_unittest.main()
| psrc/urbansim | urbansim_parcel/parcel/distance_to_SSS_dataset.py | distance_to_SSS_dataset.py | py | 1,645 | python | en | code | 4 | github-code | 36 |
38930132887 | """
from pytube import Playlist
import pytube
itemlist = {}
playlist = Playlist("")
for item in playlist:
j = pytube.YouTube(item).title.title()
oi = pytube.YouTube(item).metadata.metadata
print(oi)
print(j)
itemlist[j] = [item, oi]
print(itemlist)
from mutagen.easyid3 import EasyID3
audio = EasyID3("example.mp3")
audio['title'] = u"Example Title"
audio['artist'] = u"Me"
audio['album'] = u"My album"
audio['composer'] = u"" # clear
audio.save()
"""
import re
import os
from mutagen.easyid3 import EasyID3
universal_folder = r"D:\Pycharm\PycharmProjects\AHHHHHHHH\DM"
nld = []
os.chdir(universal_folder)
for song in os.listdir(universal_folder):
audio = EasyID3(song)
audio['title'] = song.replace(".mp3", "")
audio['artist'] = u"Panic! At The Disco"
audio['album'] = u"A Fever You Can't Sweat Out"
audio['composer'] = u""
audio.save()
print(song)
| Suave101/pytube-AHHHHHH | Tags.py | Tags.py | py | 903 | python | en | code | 0 | github-code | 36 |
17217695574 | from extras import *
from common import Common
from indexer import Indexer
import math
import re
import nltk
import operator
from collections import defaultdict, OrderedDict
from nltk.stem import SnowballStemmer
from nltk.corpus import wordnet
class Query_Expansion:
def __init__(self):
"""
Constructor: Used to initialize all the class variables
"""
self.utility = Utility()
self.frequency_map = defaultdict()
self.synonyms_map = defaultdict()
self.file_handling = FileHandling()
self.common = Common()
self.indexer = Indexer()
def generate_expected_words_for_expansion(self, queries):
stopWords = self.utility.get_stop_list()
stemmer = SnowballStemmer("english")
for i in range (0,len(queries)):
query = queries[i]
listofwords = []
words = query.split()
for word in words:
word = word.lower()
stem = stemmer.stem(word)
expected = self.fetch_expected_words(word,stem)
if expected not in stopWords:
frequency = self.generate_frequency_map(word,expected)
if frequency > 0:
listofwords.append(expected)
self.frequency_map[i+1] = listofwords
return self.frequency_map
def generate_frequency_map(self,word,stem):
occurrences = 0
if stem in self.positional_index and word in self.positional_index:
dict_stem = self.positional_index[stem]
dict_word = self.positional_index[word]
for doc in dict_word:
if doc in dict_stem:
list1 = dict_word[doc]
list2 = dict_stem[doc]
pos1 = 0
for i in range(0, len(list1)):
pos1 = pos1 + list1[i]
pos2 = 0
for j in range(0, len(list2)):
pos2 = pos2 + list2[j]
if abs(pos1 - pos2) <= 12:
occurrences = occurrences + 1
break
return occurrences
def fetch_expected_words(self,word,stem):
if self.utility.check_word_exist(stem):
return stem
else:
return nltk.stem.WordNetLemmatizer().lemmatize(word)
def expand_queries_using_stemming(self, queries):
self.positional_index = self.indexer.read_index(index_type=True)
print('\n' + self.utility.line_break + '\n' +\
'Running Query Expansion using Stemming..')
stem_map = self.generate_expected_words_for_expansion(queries)
updated_query_map = defaultdict(set)
for i in range(len(queries)):
stop_words = self.utility.get_stop_list()
listofwords = stem_map[i+1]
for word in listofwords:
for syn in wordnet.synsets(word):
for l in syn.lemmas():
if str(l.name) not in queries[i] and '_' not in str(l.name) and str(l.name) not in stop_words:
updated_query_map[i+1].add(l.name())
if (len(updated_query_map[i+1])) > 4:
break
if len(updated_query_map[i+1]) > 4:
break
new_queries = []
for i in range (len(queries)):
old_query = queries[i]
new_query = old_query
for word in updated_query_map[i+1]:
new_query = new_query + " "+ str(word)
new_queries.append(new_query)
return new_queries
def create_tf(self,inverted_index):
tf = {}
for term in inverted_index:
c = 0
doc_to_frequency = inverted_index[term]
for doc in doc_to_frequency:
c = c + doc_to_frequency[doc]
tf[term] = c
return self.generatePotentialQuery(tf)
# generating potential query words by evaluating term frequency and removing stop words
def generatePotentialQuery(self,tf):
terms = []
total = 0
for key, value in tf.items():
total = total + value
potentialList = []
for key, value in tf.items():
if key not in self.utility.get_stop_list() and len(key) > 4:
potentialList.append(key)
return potentialList
# calculating dice's co-efficient for different terms
def diceCoff(self,list1, list2, invertedIndex):
associationDict = {}
for i in list1:
if i != "in" and i in invertedIndex:
docList = invertedIndex[i]
sum = 0
for j in list2:
docList2 = invertedIndex[j]
sum = 0
for k in docList2:
if k in docList:
sum = sum + 1
if sum > 10:
associationDict[i + " " + j] = sum * 1.0 / (len(docList) + len(docList2))
sorted_dict = OrderedDict(associationDict)
return sorted_dict
def expand_queries_using_pseduo_relevance(self, queries):
print('\n' + self.utility.line_break + '\n' +\
'Running Query Expansion using Pseduo Relevance..')
docs = self.common.read_top_documents_for_score(top=40)
relevant_docs = []
for record in docs:
relevant_docs.append((record.values()[0]))
self.indexer.create_save_indexer_with_relevant_docs(relevant_docs)
inverted_index = self.indexer.read_simple_index()
potential_list = self.create_tf(inverted_index)
updated_query_list = []
for i in range(len(queries)):
query = queries[i]
query = query.lower()
words_from_query = []
word_array = query.split()
for word in word_array:
word = re.sub(r'\W+', ' ', word)
if word not in self.utility.get_stop_list():
words_from_query.append(word)
updatedQuery = query
suggested_words = self.diceCoff(words_from_query,potential_list,inverted_index).items()
k = 0
for value in suggested_words:
if k > 8:
break
else:
words = value[0].split()
if words[1] not in updatedQuery:
updatedQuery = updatedQuery + ' ' + words[1]
k = k + 1
updated_query_list.append(updatedQuery)
return updated_query_list | ghildiyal-ashutosh/Search_Engine | tasks/query_expansion.py | query_expansion.py | py | 6,739 | python | en | code | 2 | github-code | 36 |
33057055793 | from flask import request
from app import app
from app.service import get_value, set_value, add_set, get_set, set_expiry, range_elements, rank
#End point for GET, SET and EXPIRE command
@app.route('/redis/key',methods=['POST','GET','PATCH'])
def keyDetails():
if request.method == "GET":
key = request.form['key']
return get_value(key)
elif request.method == "POST":
key = request.form['key']
value = request.form['value']
return set_value(key,value)
elif request.method == "PATCH":
key = request.form['key']
time = request.form['time']
return set_expiry(key,time)
else:
return "bad request"
#End point for ZADD command
@app.route('/redis/set',methods=['POST', 'GET'])
def sorted_set():
if request.method == 'POST':
key = request.form['key']
value = request.form['value']
score = request.form['score']
return add_set(key,score,value)
elif request.method == 'GET':
return get_set()
else:
return "bad request"
#End point for ZRANGE command
@app.route('/redis/set/range',methods=['POST', 'GET'])
def range():
if request.method == 'GET':
key = request.form['key']
left = request.form['left']
right = request.form['right']
return range_elements(key,left,right)
else:
return "bad request"
#End point for ZRANK command
@app.route('/redis/set/rank',methods=['POST', 'GET'])
def find():
if request.method == 'GET':
key = request.form['key']
value = request.form['value']
return rank(key,value)
else:
return "bad request" | shreyans-sureja/Redis-Implementation | app/routes.py | routes.py | py | 1,651 | python | en | code | 0 | github-code | 36 |
43008845296 | import pytest
from sqlobject import DatabaseIndex, ForeignKey, IntCol, MultipleJoin, \
SQLObject, StringCol
from sqlobject.dberrors import DatabaseError, IntegrityError, \
OperationalError, ProgrammingError
from sqlobject.tests.dbtest import raises, setupClass, supports
########################################
# Indexes
########################################
class SOIndex1(SQLObject):
name = StringCol(length=100)
number = IntCol()
nameIndex = DatabaseIndex('name', unique=True)
nameIndex2 = DatabaseIndex(name, number)
nameIndex3 = DatabaseIndex({'column': name,
'length': 3})
class SOIndex2(SQLObject):
name = StringCol(length=100)
nameIndex = DatabaseIndex({'expression': 'lower(name)'})
def test_indexes_1():
setupClass(SOIndex1)
n = 0
for name in 'blah blech boring yep yort snort'.split():
n += 1
SOIndex1(name=name, number=n)
mod = SOIndex1._connection.module
raises(
(mod.ProgrammingError, mod.IntegrityError,
mod.OperationalError, mod.DatabaseError,
ProgrammingError, IntegrityError, OperationalError, DatabaseError),
SOIndex1, name='blah', number=0)
def test_indexes_2():
if not supports('expressionIndex'):
pytest.skip("expressionIndex isn't supported")
setupClass(SOIndex2)
SOIndex2(name='')
class PersonIndexGet(SQLObject):
firstName = StringCol(length=100)
lastName = StringCol(length=100)
age = IntCol(alternateID=True)
nameIndex = DatabaseIndex(firstName, lastName, unique=True)
def test_index_get_1():
setupClass(PersonIndexGet, force=True)
PersonIndexGet(firstName='Eric', lastName='Idle', age=62)
PersonIndexGet(firstName='Terry', lastName='Gilliam', age=65)
PersonIndexGet(firstName='John', lastName='Cleese', age=66)
PersonIndexGet.get(1)
PersonIndexGet.nameIndex.get('Terry', 'Gilliam')
PersonIndexGet.nameIndex.get(firstName='John', lastName='Cleese')
raises(Exception, PersonIndexGet.nameIndex.get,
firstName='Graham', lastName='Chapman')
raises(Exception, PersonIndexGet.nameIndex.get,
'Terry', lastName='Gilliam')
raises(Exception, PersonIndexGet.nameIndex.get, 'Terry', 'Gilliam', 65)
raises(Exception, PersonIndexGet.nameIndex.get, 'Terry')
class PersonIndexGet2(SQLObject):
name = StringCol(alternateID=True, length=100)
age = IntCol()
addresses = MultipleJoin('AddressIndexGet2')
class AddressIndexGet2(SQLObject):
person = ForeignKey('PersonIndexGet2', notNone=True)
type = StringCol(notNone=True, length=100)
street = StringCol(notNone=True)
pk = DatabaseIndex(person, type, unique=True)
def test_index_get_2():
setupClass([PersonIndexGet2, AddressIndexGet2])
p = PersonIndexGet2(name='Terry Guilliam', age=64)
AddressIndexGet2(person=p, type='home', street='Terry Street 234')
AddressIndexGet2(person=p, type='work', street='Guilliam Street 234')
AddressIndexGet2.pk.get(p, 'work')
AddressIndexGet2.pk.get(person=p, type='work')
| sqlobject/sqlobject | sqlobject/tests/test_indexes.py | test_indexes.py | py | 3,088 | python | en | code | 140 | github-code | 36 |
27283710636 | """
2020 (c) piteren
"""
class TE:
def __init__(self):
self.pa = 3
self.pb = 'text'
def __str__(self):
return f'pa:{self.pa} pb:{self.pb}'
t = TE()
print(t.__dict__)
t.__dict__['pa'] = 4
print(t.__dict__)
print(t) | piteren/pypoks | code_concepts/dict_attr.py | dict_attr.py | py | 252 | python | en | code | 19 | github-code | 36 |
74551961384 | #!/usr/bin/env python3
# encoding:utf-8
'''
@author: lierl
@file: use_enum.py
@time: 2018/3/24 17:31
'''
__author__ = 'lierl'
from enum import Enum, unique
Month = Enum('Month',('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'))
for name, member in Month.__members__.items():
print(name, '==>', member, ",", member.value)
@unique#@unique装饰器可以帮助我们检查保证没有重复值。
class Weekday(Enum):
Sun = 0
Mon = 1
Tue = 2
Wed = 3
Thu = 4
Fri = 5
Sat = 6
day1 = Weekday.Mon
print(day1)
print(Weekday['Tue'])
print(Weekday.Tue.value)
for name, member in Weekday.__members__.items():
print(name, "==>", member)
# 把Student的gender属性改造为枚举类型,可以避免使用字符串:
class Student(object):
def __init__(self, name, gender):
self.name = name
self.gender = gender
@unique
class Gender(Enum):
Male = 0
Female = 1
bart = Student('Bart', Gender.Male)
if bart.gender == Gender.Male:
print('测试通过')
else:
print("测试失败")
# Enum可以把一组相关常量定义在一个class中,且class不可变,而且成员可以直接比较。 | dream7319/djtest | demo/use_enum.py | use_enum.py | py | 1,187 | python | en | code | 0 | github-code | 36 |
39056277589 | from matplotlib import pyplot as plt
vr=[2.6,2.8,3.0,3.1,3.2,3.3,3.4,3.6]
VR=[48.37,57.55,64.92,69.12,72.82,74.12,73.1,67.66]
plt.plot(vr,VR,'k',lw=1.5)
plt.scatter(vr,VR,marker='+',s=90,lw=1.5)
plt.grid()
plt.xlabel('Rupture speed (km/s)')
plt.ylabel('Variance reductio (%)')
plt.show() | Ogweno/mylife | Nepal/plot_rupt_speed.py | plot_rupt_speed.py | py | 290 | python | en | code | 0 | github-code | 36 |
13662077877 | import tensorflow as tf
from .network import Network
from ..fast_rcnn.config import cfg
class Resnet50_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data1 = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data1')
self.data2 = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data2')
self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info')
self.gt_boxes1 = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes1')
self.gt_boxes2 = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes2')
self.dif_boxes = tf.placeholder(tf.float32, shape=[None, 4], name='dif_boxes')
self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard')
self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas')
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data1':self.data1, 'data2':self.data2, 'im_info':self.im_info, 'gt_boxes1':self.gt_boxes1, \
'gt_boxes2': self.gt_boxes2, 'dif_boxes':self.dif_boxes,'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas})
self.trainable = trainable
self.setup()
def setup(self):
with tf.device('/gpu:0'):
with tf.variable_scope("siamese") as scope:
feature1, roi1 = self.object_detection('data1', 'gt_boxes1', is_train=True)
scope.reuse_variables()
feature2 = self.object_detection('data2', 'gt_boxes2', is_train=False)
inputs = self.crop(feature1, roi1)
targets = self.crop(feature2, roi1)
with tf.device('/gpu:1'):
inputs_norm = tf.tanh(inputs)
targets_norm = tf.tanh(targets)
with tf.variable_scope("generator"):
fakes = self.Generator(inputs_norm)
fakes_norm = tf.tanh(fakes)
with tf.variable_scope("siamese") as scope:
true_pair = tf.concat([inputs_norm, targets_norm], axis=3)
self.score1 = tf.sigmoid(self.Discriminator(true_pair))
scope.reuse_variables()
false_pair = tf.concat([inputs_norm, fakes_norm], axis=3)
self.score2 = tf.sigmoid(self.Discriminator(false_pair))
self.fakes = fakes_norm
self.targets = targets_norm
def object_detection(self,x, box, is_train):
# anchor_scales = [8, 16, 32]
anchor_scales = cfg.ANCHOR_SCALES
n_classes = cfg.NCLASSES
_feat_stride = [16, ]
(self.feed(x)
.conv(7, 7, 64, 2, 2, relu=False, name='conv1')
.batch_normalization(relu=True, name='bn_conv1', is_training=False)
.max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(name='bn2a_branch1', is_training=False, relu=False))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(relu=True, name='bn2a_branch2a', is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(relu=True, name='bn2a_branch2b', is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(name='bn2a_branch2c', is_training=False, relu=False))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(relu=True, name='bn2b_branch2a', is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(relu=True, name='bn2b_branch2b', is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(name='bn2b_branch2c', is_training=False, relu=False))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(relu=True, name='bn2c_branch2a', is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(relu=True, name='bn2c_branch2b', is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(name='bn2c_branch2c', is_training=False, relu=False))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID')
.batch_normalization(name='bn3a_branch1', is_training=False, relu=False))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn3a_branch2a', is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(relu=True, name='bn3a_branch2b', is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(name='bn3a_branch2c', is_training=False, relu=False))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a')
.batch_normalization(relu=True, name='bn3b_branch2a', is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b')
.batch_normalization(relu=True, name='bn3b_branch2b', is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c')
.batch_normalization(name='bn3b_branch2c', is_training=False, relu=False))
(self.feed('res3a_relu',
'bn3b_branch2c')
.add(name='res3b')
.relu(name='res3b_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a')
.batch_normalization(relu=True, name='bn3c_branch2a', is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b')
.batch_normalization(relu=True, name='bn3c_branch2b', is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c')
.batch_normalization(name='bn3c_branch2c', is_training=False, relu=False))
(self.feed('res3b_relu',
'bn3c_branch2c')
.add(name='res3c')
.relu(name='res3c_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a')
.batch_normalization(relu=True, name='bn3d_branch2a', is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b')
.batch_normalization(relu=True, name='bn3d_branch2b', is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c')
.batch_normalization(name='bn3d_branch2c', is_training=False, relu=False))
(self.feed('res3c_relu',
'bn3d_branch2c')
.add(name='res3d')
.relu(name='res3d_relu')
.conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID')
.batch_normalization(name='bn4a_branch1', is_training=False, relu=False))
(self.feed('res3d_relu')
.conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn4a_branch2a', is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(relu=True, name='bn4a_branch2b', is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(name='bn4a_branch2c', is_training=False, relu=False))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a')
.batch_normalization(relu=True, name='bn4b_branch2a', is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b')
.batch_normalization(relu=True, name='bn4b_branch2b', is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c')
.batch_normalization(name='bn4b_branch2c', is_training=False, relu=False))
(self.feed('res4a_relu',
'bn4b_branch2c')
.add(name='res4b')
.relu(name='res4b_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a')
.batch_normalization(relu=True, name='bn4c_branch2a', is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b')
.batch_normalization(relu=True, name='bn4c_branch2b', is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c')
.batch_normalization(name='bn4c_branch2c', is_training=False, relu=False))
(self.feed('res4b_relu',
'bn4c_branch2c')
.add(name='res4c')
.relu(name='res4c_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a')
.batch_normalization(relu=True, name='bn4d_branch2a', is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b')
.batch_normalization(relu=True, name='bn4d_branch2b', is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c')
.batch_normalization(name='bn4d_branch2c', is_training=False, relu=False))
(self.feed('res4c_relu',
'bn4d_branch2c')
.add(name='res4d')
.relu(name='res4d_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a')
.batch_normalization(relu=True, name='bn4e_branch2a', is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b')
.batch_normalization(relu=True, name='bn4e_branch2b', is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c')
.batch_normalization(name='bn4e_branch2c', is_training=False, relu=False))
(self.feed('res4d_relu',
'bn4e_branch2c')
.add(name='res4e')
.relu(name='res4e_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a')
.batch_normalization(relu=True, name='bn4f_branch2a', is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b')
.batch_normalization(relu=True, name='bn4f_branch2b', is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c')
.batch_normalization(name='bn4f_branch2c', is_training=False, relu=False))
(self.feed('res4e_relu',
'bn4f_branch2c')
.add(name='res4f')
.relu(name='res4f_relu'))
if is_train:
# ========= RPN ============
(self.feed('res4f_relu')
.conv(3, 3, 512, 1, 1, name='rpn_conv/3x3')
.conv(1, 1, len(anchor_scales) * 3 * 2, 1, 1, padding='VALID', relu=False, name='rpn_cls_score'))
(self.feed('rpn_cls_score', box, 'gt_ishard', 'dontcare_areas', 'im_info')
.anchor_target_layer(_feat_stride, anchor_scales, name='rpn-data'))
# Loss of rpn_cls & rpn_boxes
(self.feed('rpn_conv/3x3')
.conv(1, 1, len(anchor_scales) * 3 * 4, 1, 1, padding='VALID', relu=False, name='rpn_bbox_pred'))
# ========= RoI Proposal ============
(self.feed('rpn_cls_score')
.spatial_reshape_layer(2, name='rpn_cls_score_reshape')
.spatial_softmax(name='rpn_cls_prob'))
(self.feed('rpn_cls_prob')
.spatial_reshape_layer(len(anchor_scales) * 3 * 2, name='rpn_cls_prob_reshape'))
(self.feed('rpn_cls_prob_reshape', 'rpn_bbox_pred', 'im_info')
.proposal_layer(_feat_stride, anchor_scales, 'TRAIN', name='rpn_rois'))
rpn_rois = self.get_output('rpn_rois')
(self.feed(rpn_rois[0], box, 'gt_ishard', 'dontcare_areas')
.proposal_target_layer(n_classes, name='roi-data'))
return self.get_output('res4f_relu'), self.get_output('roi-data')[0]
else:
return self.get_output('res4f_relu')
def crop(self, feature, roi):
(self.feed(feature, roi)
.roi_pool(8, 8, 1.0 / 16, name='res5a_branch2a_roipooling'))
return self.get_output('res5a_branch2a_roipooling')
def Generator(self, x):
(self.feed(x)
.relu(name='encorder_relu1')
.conv(4, 4, 1024, 2, 2, name='encorder_conv1')
.batch_normalization(name='encorder_branch1', is_training=False, relu=False)
.relu(name='encorder_relu2')
.conv(4, 4, 512, 2, 2, name='encorder_conv2')
.batch_normalization(name='encorder_branch2', is_training=False, relu=False)
.relu(name='encorder_relu3')
.conv(4, 4, 512, 2, 2, name='encorder_conv3')
.batch_normalization(name='encorder_branch3', is_training=False, relu=False)
.relu(name='decoder_relu1')
.deconv(512, 4, 2, 2, name='decorder_deconv1')
.batch_normalization(name='decoder_branch1', is_training=False, relu=False)
.relu(name='decoder_relu2')
.deconv(512, 4, 2, 2, name='decorder_deconv1')
.batch_normalization(name='decoder_branch2', is_training=False, relu=False)
.relu(name='decoder_relu3')
.deconv(1024, 4, 2, 2, name='decorder_deconv1')
.batch_normalization(name='decoder_branch3', is_training=False, relu=False)
)
return self.get_output('decoder_branch3')
def Discriminator(self, pair):
(self.feed(pair)
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn5a_branch2a', is_training=False)
.conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b')
.batch_normalization(relu=True, name='bn5a_branch2b', is_training=False)
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')
.batch_normalization(name='bn5a_branch2c', is_training=False, relu=False))
(self.feed(pair)
.conv(1, 1, 2048, 2, 2, biased=False, relu=False, name='res5a_branch1', padding='VALID')
.batch_normalization(name='bn5a_branch1', is_training=False, relu=False))
(self.feed('bn5a_branch2c', 'bn5a_branch1')
.add(name='res5a')
.relu(name='res5a_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')
.batch_normalization(relu=True, name='bn5b_branch2a', is_training=False)
.conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b')
.batch_normalization(relu=True, name='bn5b_branch2b', is_training=False)
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')
.batch_normalization(name='bn5b_branch2c', is_training=False, relu=False))
# pdb.set_trace()
(self.feed('res5a_relu',
'bn5b_branch2c')
.add(name='res5b')
.relu(name='res5b_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')
.batch_normalization(relu=True, name='bn5c_branch2a', is_training=False)
.conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b')
.batch_normalization(relu=True, name='bn5c_branch2b', is_training=False)
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')
.batch_normalization(name='bn5c_branch2c', is_training=False, relu=False))
# pdb.set_trace()
(self.feed('res5b_relu',
'bn5c_branch2c')
.add(name='res5c')
.relu(name='res5c_relu')
.fc(2, relu=False, name='cls_score'))
return self.get_output('cls_score') | lonl/DRoINs | lib/networks/Resnet50_train.py | Resnet50_train.py | py | 16,743 | python | en | code | 0 | github-code | 36 |
4881635428 | from chain import ChainObject
import os
##
# given vatic identifier string create vatic text file path
class Ims2Txt(ChainObject):
_consumer_ = [str]
_producer_ = [str]
def __init__(self, prms='~/vatic/vatic'):
"""
:param prms: location of vatic directory
"""
self.vaticPath = prms
ChainObject.__init__(self, prms)
def produce(self, ip):
"""
Given vatic identifier string, create vatic output text and return file path
:param ip: vatic identifier string
:return: vatic output text file path
"""
# turkic dump identifier -o output.txt --merge --merge-threshold 0.5
basePath = str(os.getcwd())
vaticFile = 'vaticOutput{0}.txt'.format(ip)
sysCall = '(' + \
'cd {0}; '.format(self.vaticPath) + \
'turkic dump {0} -o {1} --merge --merge-threshold 0.5; '.format(ip, vaticFile) + \
'mv {0} {1}'.format(vaticFile, basePath) + \
')'
os.system(sysCall)
yield os.path.join(basePath, vaticFile)
##
# Consumes a vatic text file path and returns list [frame, [box coordinates], label, (attributes)]
class Txt2Labels(ChainObject):
_consumer_ = [str]
_producer_ = [list, set]
def __init__(self, prms=None):
"""
:param prms: a set containing which contents to include
must be from {'box', 'label', 'attributes', 'occluded', 'lost', 'generated'}
:return: list of lists containing vatic information ordered as
[frameNumber, labelString, [xmin, ymin, xmax, ymax], attributesStringList,
lostBool, occludedBool, generatedBool] with the appropriate arguments ommitted
set containing the desired contents (e.g., 'box', 'label', etc.)
"""
if prms is None:
self.returnSet = {'box', 'label', 'attributes'}
else:
self.returnSet = prms
ChainObject.__init__(self, prms)
def produce(self, ip):
# read in from file
vaticList = []
with open(ip, 'r') as f:
for line in f:
inList = []
row = line.split()
inList.append(int(row[5]))
if 'label' in self.returnSet:
inList.append(row[9].strip('"'))
if 'box' in self.returnSet:
inList.append([int(x) for x in row[1:5]])
if 'attributes' in self.returnSet:
inList.append([x.strip('"') for x in row[10:]])
if 'lost' in self.returnSet:
inList.append(bool(row[6]))
if 'occluded' in self.returnSet:
inList.append(bool(row[7]))
if 'generated' in self.returnSet:
inList.append(bool(row[8]))
vaticList.append(inList)
# vatic only includes one detection per line -- combine lines from the same frame into one line
vaticList.sort(key = lambda x: x[0])
outList = []
oRow = []
for i, vRow in enumerate(vaticList):
if i==len(vaticList)-1 or vaticList[i][0] != vaticList[i+1][0]:
outList.append(list(oRow[:]))
oRow = []
else:
oRow.append(tuple(vRow))
yield outList
| gnetscher/chainer | vatic_chains.py | vatic_chains.py | py | 2,775 | python | en | code | 0 | github-code | 36 |
3389898458 | import random
class Card:
"""Represents all the cards in the game"""
def __init__(self):
self.suits = ['Heart', 'Diamond', 'Spade', 'Club']
self.ranks = [2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K', 'A']
self.deckCards = []
def deck(self):
"""Creates a new deck of 52 cards
Returns:
deckCards: list of all the cards
"""
self.deckCards = [(suit, rank)
for suit in self.suits for rank in self.ranks]
return self.deckCards
def shuffle(self):
"""Random shuffling of deck cards"""
random.shuffle(self.deckCards)
def split_cards(self):
"""Splits the 52 deck of cards into 2 halfs
Returns:
list: Two lists with 26 cards in each
"""
return (self.deckCards[:26], self.deckCards[26:])
class Player:
"""Represents a player with name and cards the player has"""
def __init__(self, player):
self.player = player
self.hand = None
if not isinstance(player, str):
raise TypeError("please enter a string")
def show_card(self):
"""Returns top card on the player's deck """
return self.hand.pop(0)
def take_card(self, taken):
"""adds the cards at bottom of player's deck """
self.hand.extend(taken)
class Table:
"""Represents the cards present on the table"""
def __init__(self):
self.tableCards = []
self.warCards = []
def war_cards(self, warhand):
""" Takes the cards in player hand and returns top three cards
Args:
warhand (list): List of cards in player hand
Returns:
list: top three cards from the players hand
"""
self.warCards = []
if len(warhand) < 3:
return warhand
else:
for i in range(3):
self.warCards.append(warhand.pop(0))
return self.warCards
class Game:
""" Represents all the actions in the game"""
def __init__(self, player1, player2):
self.p1cards = None
self.p2cards = None
self.p1 = Player(player1)
self.p2 = Player(player2)
self.table = Table()
self.card = Card()
def give_cards(self):
""" Gives the cards equally to two players"""
self.card.deck()
self.card.shuffle()
self.p1.hand, self.p2.hand = self.card.split_cards()
print(self.p1.player, "'s cards =============\n")
print(self.p1.hand)
print("\n")
print(self.p2.player, "'s cards =============\n")
print(self.p2.hand)
print("\n")
def war_mode(self):
""" Gets the top 3 cards from the each player and adds it to the deck of cards on the table"""
self.table.tableCards.extend(self.table.war_cards(self.p1.hand))
self.table.tableCards.extend(self.table.war_cards(self.p2.hand))
def winner(self):
"""Determines which player is the winner of the game
"""
if len(self.p1.hand) != 0:
print(self.p1.player, 'Wins the Game')
return self.p1.player + 'wins the game '
else:
print(self.p2.player, 'Wins the Game')
return self.p2.player + 'wins the game '
def play_game(self):
""" Plays the game until one of the player has no cards"""
self.round_count = 0
while len(self.p1.hand) != 0 and len(self.p2.hand) != 0:
self.round_count += 1
print("\n")
print("New round number", self.round_count)
print("Here are the current standings")
print(self.p1.player + " has the count:", len(self.p1.hand))
print(self.p2.player + " has the count:", len(self.p2.hand))
print("Play a card!")
print("\n")
self.table.tableCards = []
self.p1card = None
self.p2card = None
self.p1card = self.p1.show_card()
self.p2card = self.p2.show_card()
print(self.p1.player, "has ", self.p1card)
print(self.p2.player, "has ", self.p2card)
self.table.tableCards.append(self.p1card)
self.table.tableCards.append(self.p2card)
if self.p1card[1] == self.p2card[1]:
print("===============War has started====================")
self.war_mode()
if self.card.ranks.index(self.p1card[1]) < self.card.ranks.index(self.p2card[1]):
self.p1.take_card(self.table.tableCards)
else:
self.p2.take_card(self.table.tableCards)
self.table.warCards = []
else:
if self.card.ranks.index(self.p1card[1]) < self.card.ranks.index(self.p2card[1]):
self.p1.take_card(self.table.tableCards)
else:
self.p2.take_card(self.table.tableCards)
self.winner()
def main():
"""Runs the main loop of the game
"""
print("Welcome to the War Card Game")
player1 = input("Enter Player 1 Name :")
player2 = input("Enter Player 2 Name :")
game = Game(player1, player2)
game.give_cards()
game.play_game()
if __name__ == '__main__':
main()
| manideepa03/WarCardGame | WarGame.py | WarGame.py | py | 5,325 | python | en | code | 0 | github-code | 36 |
22683081485 | from tensorforce.execution.runner import Runner
import numpy as np
import time
class DeepCrawlRunner(Runner):
def __init__(self, agent, environment, max_episode_timesteps=None, history=None, curriculum = None):
self.mean_entropies = []
self.std_entropies = []
self.history = history
self.curriculum = curriculum
self.i = 0
self.unity_env = environment
# DeepCrawl
if not isinstance(environment,list):
environment = self.unity_env
environments = None
self.unity_env = [self.unity_env]
else:
environment = None
environments = self.unity_env
self.local_entropies = np.empty((len(self.unity_env), 0)).tolist()
super(DeepCrawlRunner, self).__init__(agent, environment=environment, environments=environments, max_episode_timesteps=100)
# DeepCrawl
for env in self.unity_env:
config = self.set_curriculum(self.curriculum, np.sum(self.history['episode_timesteps']))
if self.i == 0:
print(config)
self.i = 1
env.set_config(config)
# DeepCrawl
#self.reset(history)
# DeepCrawl: Update curriculum
def set_curriculum(self, curriculum, total_timesteps, self_curriculum_learning=False, mode='steps'):
if curriculum == None:
return None
if mode == 'steps':
lessons = np.cumsum(curriculum['thresholds'])
curriculum_step = 0
for (index, l) in enumerate(lessons):
if total_timesteps > l:
curriculum_step = index + 1
self.current_curriculum_step = curriculum_step
# TODO: DA FARE ASSOLUTAMENTE CURRICULUM CON MEDIA
elif mode == 'mean':
if len(self.episode_rewards) <= 100 * 6:
self.current_curriculum_step = 0
pass
means = []
for i in range(6):
mean = np.mean(self.episode_rewards[:-100 * (i + 1)])
means.append(mean)
mean = np.mean(np.asarray(means))
if mean - curriculum['lessons'][self.current_curriculum_step] < 0.05:
self.current_curriculum_step += 1
config = {}
parameters = curriculum['parameters']
for (par, value) in parameters.items():
config[par] = value[self.current_curriculum_step]
parameters = curriculum['parameters']
config = {}
for (par, value) in parameters.items():
config[par] = value[self.current_curriculum_step]
# Self curriculum setting:
# Save the model
# TODO: FARE SELF CURRICULUM PLAY
if self_curriculum_learning:
if curriculum_step > self.current_curriculum_step:
self.agent.save_model('saved/' + self.model_name, append_timestep=False)
# Freeze the TensorFlow graph and save .bytes file. All the output layers to fetch must be specified
if self.environment.with_previous:
export_pb('saved/' + self.model_name,
'ppo/actions-and-internals/categorical/sample/Select,ppo/actions-and-internals/layered-network/apply/internal_lstm0/apply/stack',
export_in_unity=True)
else:
export_pb('saved/' + self.model_name, 'ppo/actions-and-internals/categorical/sample/Select',
export_in_unity=True)
return config
def handle_act(self, parallel):
if self.batch_agent_calls:
self.environments[parallel].start_execute(actions=self.actions[parallel])
else:
agent_start = time.time()
# DeepCrawl
query = ['action-distribution-probabilities']
actions, probs = self.agent.act(states=self.states[parallel], parallel=parallel, query=query)
probs = probs[0]
self.unity_env[parallel].add_probs(probs)
self.local_entropies[parallel].append(self.unity_env[parallel].get_last_entropy())
# DeepCrawl
self.episode_agent_second[parallel] += time.time() - agent_start
self.environments[parallel].start_execute(actions=actions)
# Update episode statistics
self.episode_timestep[parallel] += 1
# Maximum number of timesteps or timestep callback (after counter increment!)
self.timesteps += 1
if (
(self.episode_timestep[parallel] % self.callback_timestep_frequency == 0 and not self.callback(self)) or
self.timesteps >= self.num_timesteps
):
self.terminate = 2
def handle_terminal(self, parallel):
# Update experiment statistics
self.episode_rewards.append(self.episode_reward[parallel])
self.episode_timesteps.append(self.episode_timestep[parallel])
self.episode_seconds.append(time.time() - self.episode_start[parallel])
self.episode_agent_seconds.append(self.episode_agent_second[parallel])
# DeepCrawl
self.mean_entropies.append(np.mean(self.local_entropies[parallel]))
self.std_entropies.append(np.std(self.local_entropies[parallel]))
self.update_history()
# DeepCrawl
# Maximum number of episodes or episode callback (after counter increment!)
self.episodes += 1
if self.terminate == 0 and ((
self.episodes % self.callback_episode_frequency == 0 and
not self.callback(self, parallel)
) or self.episodes >= self.num_episodes):
self.terminate = 1
# Reset episode statistics
self.episode_reward[parallel] = 0.0
self.episode_timestep[parallel] = 0
self.episode_agent_second[parallel] = 0.0
self.episode_start[parallel] = time.time()
# Reset environment
if self.terminate == 0 and not self.sync_episodes:
self.terminals[parallel] = -1
# DeepCrawl
# Set curriculum configuration
for env in self.unity_env:
config = self.set_curriculum(self.curriculum, np.sum(self.history['episode_timesteps']))
if self.i == 0:
print(config)
self.i = 1
env.set_config(config)
# DeepCrawl
self.environments[parallel].start_reset()
def update_history(self):
self.history["episode_rewards"].extend(self.episode_rewards)
self.history["episode_timesteps"].extend(self.episode_timesteps)
self.history["mean_entropies"].extend(self.mean_entropies)
self.history["std_entropies"].extend(self.std_entropies)
self.reset()
def reset(self):
self.episode_rewards = list()
self.episode_timesteps = list()
self.std_entropies = list()
self.mean_entropies = list()
#self.real_episode_rewards = history.get("real_episode_rewards", list())
self.reward_model_loss = list()
| SestoAle/Adaptive-NPCs-with-procedural-entities | deepcrawl_runner.py | deepcrawl_runner.py | py | 7,100 | python | en | code | 2 | github-code | 36 |
43376024848 | import RPi.GPIO as GPIO
from basemode import BaseMode
from time import sleep
import os
from random import choice, randint
import threading
import multiprocessing
class PiPlayBoxMode(BaseMode):
def setup(self):
self.SOUND = 1
self.LCD = 2
self.LCDCOLOUR = 4
self.TRAFFICLIGHT = 8
self.modecolour = self.display.YELLOW
self.actions = {0: {"text": "Wait for the\ngreen man",
"method": "RedToGreen"},
1: {"text": "Red light\nmeans stop",
"method": "binaryLight",
"args": 1},
2: {"text": "Yellow light\nmeans wait",
"method": "binaryLight",
"args": 2},
3: {"text": "Green light\nmeans go",
"method": "binaryLight",
"args": 4},
4: {"text": None,
"method": None},
5: {"text": None,
"method": None},
6: {"text": None,
"method": None},
7: {"text": None,
"method": None}}
self.modename = "Traffic Lights"
self.subtext = "Green means go!"
self.musicdir = os.path.join(self.plugindir, "music")
self.display.changeColour(self.modecolour)
self.display.Update("%s\n%s" % (self.modename,
self.subtext))
self.pattern = None
self.addInterrupts()
def addInterrupts(self):
e = GPIO.add_event_detect
for i in range(8):
e(self.buttons[i],
GPIO.RISING,
lambda channel, x=i: self.buttonAction(x),
bouncetime=600)
def buttonAction(self, button):
self.trafficlight.stop()
action = self.actions[button]
if action["text"] is not None:
args = action.get("args", None)
self.trafficlight.start(target=action["method"], args=args)
self.display.Update(action["text"])
def quit(self):
self.trafficlight.stop()
| elParaguayo/PiPlayBox | modes/lights/mode.py | mode.py | py | 2,266 | python | en | code | 3 | github-code | 36 |
72274077545 | print("C++ Getters/Setters Generator")
print("By hiraki\n")
seperate = input("Do you want to seperate the declaration and the implementation? [Y/N]: ").lower()
if (seperate == "y"):
seperate = True
classname = input("Enter class name: ")
else:
seperate = False
print("\nPlease enter the member variables as if you are declaring them in a C++ class (with or without semicolons).")
print("You can also paste your existing declaration code here.")
print("Lines can begin or end with spaces, tab characters or semicolons. Failing to comply to these rules might lead to unexpected result.")
print("Enter 'DONE' (in capital, without quotes) to finish.")
print("""\nExample:
string foo
int bar;
float foobar;
AnotherClass barfoo\n""")
declaration = ""
implementation = ""
while True:
line = input("> ")
line = line.strip("\n").strip().strip("\t").strip(";")
if (line == "DONE"):
break
try:
datatype, name = [x.strip() for x in line.split()]
except:
print("Not enough parameters to parse.")
print("Please check that you entered both the datatype and the variable name correctly.")
continue
getPrototype = f"{datatype} get{name}()"
getImplementation = ""
if (not seperate):
getPrototype += f" {{\n\treturn this->{name}; \n}}\n\n"
else:
getImplementation += f"{datatype} {classname}::get{name}()"
getImplementation += f" {{\n\treturn this->{name}; \n}}\n\n"
getPrototype += ";\n"
implementation += getImplementation
declaration += getPrototype
setPrototype = f"void set{name}({datatype} {name.lower()})"
setImplementation = ""
if (not seperate):
setPrototype += f" {{\n\tthis->{name} = {name.lower()}; \n}}\n\n"
else:
setImplementation += f"void {classname}::set{name}({datatype} {name.lower()})"
setImplementation += f" {{\n\tthis->{name} = {name.lower()}; \n}}\n\n"
setPrototype += ";\n"
implementation += setImplementation
declaration += setPrototype
print(declaration)
print("-----------------------")
print(implementation) | hungngocphat01/cpp-getter-setter-generator | getter-setter-generator.py | getter-setter-generator.py | py | 2,201 | python | en | code | 0 | github-code | 36 |
19788170743 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import re
import math
import codecs
import random
import numpy as np
import os
import jieba
import pickle
import shutil
jieba.initialize()
def zero_digits(s):
"""将0~9数字字符统一用"0"字符取代
"""
return re.sub('\d', '0', s)
def load_sentences(path, lower, zeros):
"""加载数据,将数据转为:
[[[字, 标签], [字, 标签]...], # 第一句
[...], # 第二句
...
]
"""
sentences = []
sentence = []
with open(path) as f:
for line in f:
line = line.rstrip()
line = zero_digits(line) if zeros else line # 数字转换为"0"
if not line: # 如果是空行(新句子)
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
else: # 如果不是空行
word = line.split(" ")
assert len(word) == 2, print([word[0]]) # 确保切分后长度为2,[词,tag]
sentence.append(word)
if len(sentence) > 0:
sentences.append(sentence)
return sentences
def iob2(tags):
"""检查标签是否规范
"""
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
def iob_iobes(tags):
""" IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
def update_tag_scheme(sentences):
"""检查标签,转为iobes标注
"""
for i, s in enumerate(sentences):
tags = [w[-1] for w in s] # 标签序列
if not iob2(tags):
s_str = '\n'.join(' '.join(w) for w in s)
raise Exception('IOB标签有误,请检查 {}:\n{}'.format(i, s_str))
new_tags = iob_iobes(tags)
for word, new_tag in zip(s, new_tags):
word[-1] = new_tag
def load_data(config):
# 载入数据集
train_sentences = load_sentences(config["train_file"], config["lower"], config["zeros"])
dev_sentences = load_sentences(config["dev_file"], config["lower"], config["zeros"])
test_sentences = load_sentences(config["test_file"], config["lower"], config["zeros"])
# 修正语料标注格式(IOB→IOBES)
update_tag_scheme(train_sentences)
update_tag_scheme(dev_sentences)
update_tag_scheme(test_sentences)
return train_sentences, dev_sentences, test_sentences
def iobes_iob(tags):
""" IOBES -> IOB
"""
new_tags = []
for i, tag in enumerate(tags):
if tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags
def create_mapping(words_fre_dic):
"""创建词与编号的映射字典
"""
sorted_items = sorted(words_fre_dic.items(), key=lambda x: (-x[1], x[0])) # 降序排列
id_to_word = {i: v[0] for i, v in enumerate(sorted_items)} # {编号:词}
word_to_id = {v: k for k, v in id_to_word.items()} # {词: 编号}
return word_to_id, id_to_word # 返回{词: 编号},{编号: 词}的映射字典
def augment_with_pretrained(dictionary, ext_emb_path):
"""将预训练embedding里的词添加到{词: 词频}字典里
"""
print('加载预训练好的词向量...')
assert os.path.isfile(ext_emb_path)
words_pretrained = set() # 预训练词
for line in open(ext_emb_path):
words_pretrained.add(line.rstrip().split()[0].strip())
count = 0
for word in words_pretrained:
if word not in dictionary:
count += 1
dictionary[word] = 0 # 即训练集中该词的词频为0
print("词表新增加 {} 种词,现有 {} 种词.".format(count, len(dictionary)))
return dictionary
def create_dic(sentences, lower=False):
"""创建词典(词——词频)
"""
words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences] # 字母转小写,抽取所有的word
tags = [[x[-1] for x in s] for s in sentences] # 标签列表
num_words = sum(len(x) for x in words)
num_tags = sum(len(x) for x in tags)
print("词总数: {}".format(num_words))
print("标签总数: {}".format(num_tags))
assert num_words == num_tags, print("词与标签数量不等!")
words_fre_dic = {}
for sen_word in words:
for word in sen_word:
if word not in words_fre_dic:
words_fre_dic[word] = 1
else:
words_fre_dic[word] += 1
words_fre_dic["<PAD>"] = 10000001
words_fre_dic['<UNK>'] = 10000000
print("词种类数:{}".format(len(words_fre_dic)))
tags_fre_dic = {}
for sen_tag in tags:
for tag in sen_tag:
if tag not in tags_fre_dic:
tags_fre_dic[tag] = 1
else:
tags_fre_dic[tag] += 1
print("标签种类数:{}".format(len(tags_fre_dic)))
return words_fre_dic, tags_fre_dic
def create_maps(train_sentences, config):
if not os.path.isfile(config["map_file"]): # 创建新的maps
words_dic_train, tags_dic_train = create_dic(train_sentences, config["lower"]) # 生成训练集{词: 词频}字典
tag_to_id, id_to_tag = create_mapping(tags_dic_train) # 创建标签与编号映射字典 {标签: 编号}, {编号: 标签}
# 创建词与编号的映射字典 {词: 编号}, {编号: 词}
if config["pre_emb"]:
dic_add_pre = augment_with_pretrained(words_dic_train.copy(), config["emb_file"]) # 预训练词
word_to_id, id_to_word = create_mapping(dic_add_pre)
else:
word_to_id, id_to_word = create_mapping(words_dic_train)
with open(config["map_file"], "wb") as f:
pickle.dump([word_to_id, id_to_word, tag_to_id, id_to_tag], f) # 保存词和标签的编号映射
else: # 直接读取已有的maps
with open(config["map_file"], "rb") as f:
word_to_id, id_to_word, tag_to_id, id_to_tag = pickle.load(f)
return word_to_id, id_to_word, tag_to_id, id_to_tag
def get_seg_features(string):
"""结巴分词,获取分词特征
"""
seg_feature = []
for word in jieba.cut(string):
if len(word) == 1:
seg_feature.append(0) # 如果词长是1,seg_feature==0
else:
tmp = [2] * len(word) # 如果词长>1,用1表示开头,用3表示结尾,用2表示中间
tmp[0] = 1
tmp[-1] = 3
seg_feature.extend(tmp)
return seg_feature # 所以seg_feature的长度仍然和字符串长度相同
def create_input(data):
""" Take sentence data and return an input for
the training or the evaluation function.
"""
inputs = list()
inputs.append(data['chars'])
inputs.append(data["segs"])
inputs.append(data['tags'])
return inputs
def load_word2vec(emb_path, id_to_word, word_dim, old_weights):
""" Load word embedding from pre-trained file
embedding size must match
"""
new_weights = old_weights
print('Loading pretrained embeddings from {}...'.format(emb_path))
pre_trained = {}
emb_invalid = 0
for i, line in enumerate(codecs.open(emb_path, 'r', 'utf-8')):
line = line.rstrip().split()
if len(line) == word_dim + 1:
pre_trained[line[0]] = np.array(
[float(x) for x in line[1:]]
).astype(np.float32)
else:
emb_invalid += 1
if emb_invalid > 0:
print('WARNING: %i invalid lines' % emb_invalid)
c_found = 0
c_lower = 0
c_zeros = 0
n_words = len(id_to_word)
# Lookup table initialization
for i in range(n_words):
word = id_to_word[i]
if word in pre_trained:
new_weights[i] = pre_trained[word]
c_found += 1
elif word.lower() in pre_trained:
new_weights[i] = pre_trained[word.lower()]
c_lower += 1
elif re.sub('\d', '0', word.lower()) in pre_trained:
new_weights[i] = pre_trained[
re.sub('\d', '0', word.lower())
]
c_zeros += 1
print('Loaded %i pretrained embeddings.' % len(pre_trained))
print('%i / %i (%.4f%%) words have been initialized with '
'pretrained embeddings.' % (
c_found + c_lower + c_zeros, n_words,
100. * (c_found + c_lower + c_zeros) / n_words)
)
print('%i found directly, %i after lowercasing, '
'%i after lowercasing + zero.' % (
c_found, c_lower, c_zeros
))
return new_weights
def full_to_half(s):
"""
Convert full-width character to half-width one
"""
n = []
for char in s:
num = ord(char)
if num == 0x3000:
num = 32
elif 0xFF01 <= num <= 0xFF5E:
num -= 0xfee0
char = chr(num)
n.append(char)
return ''.join(n)
def replace_html(s):
s = s.replace('"', '"')
s = s.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace(' ', ' ')
s = s.replace("“", "“")
s = s.replace("”", "”")
s = s.replace("—", "")
s = s.replace("\xa0", " ")
return s
def input_from_line(line, char_to_id):
"""
Take sentence data and return an input for
the training or the evaluation function.
"""
line = full_to_half(line)
line = replace_html(line)
inputs = list()
inputs.append([line])
line.replace(" ", "$")
inputs.append([[char_to_id[char] if char in char_to_id else char_to_id["<UNK>"]
for char in line]])
inputs.append([get_seg_features(line)])
inputs.append([[]])
return inputs
def prepare_dataset(sentences, char_to_id, tag_to_id, lower=False, train=True):
"""得到N个句子的 [[词列表], [词编号], [分词特征编号], [tag编号]]
"""
none_index = tag_to_id["O"]
def f(x):
return x.lower() if lower else x
data = []
for s in sentences:
string = [w[0] for w in s]
chars = [char_to_id[f(w) if f(w) in char_to_id else '<UNK>'] for w in string]
segs = get_seg_features("".join(string))
if train:
tags = [tag_to_id[w[-1]] for w in s]
else:
tags = [none_index for _ in chars]
data.append([string, chars, segs, tags])
return data
def make_path(config):
"""生成路径
"""
print("make path...")
if not os.path.isdir(config["result_path"]):
os.makedirs(config["result_path"])
if not os.path.isdir(config["ckpt_path"]):
os.makedirs(config["ckpt_path"])
if not os.path.isdir(config["log_path"]):
os.makedirs(config["log_path"])
def clean(config):
"""清空无关文件
"""
print("clean files...")
if os.path.isfile(config["map_file"]):
os.remove(config["map_file"])
if os.path.isdir(config["ckpt_path"]):
shutil.rmtree(config["ckpt_path"])
if os.path.isdir(config["result_path"]):
shutil.rmtree(config["result_path"])
if os.path.isfile(config["config_file"]):
os.remove(config["config_file"])
if os.path.isdir("__pycache__"):
shutil.rmtree("__pycache__")
class BatchManager(object):
# 数据分成batch,每个batch含有
# [[[词列表], [词列表], ...], (batch_size个)
# [[词编号], [词编号], ...],
# [[分词特征编号], [分词特征编号], ...],
# [tag编号], tag编号], ...]]
def __init__(self, data, batch_size):
self.batch_data = self.sort_and_pad(data, batch_size)
self.len_data = len(self.batch_data)
def sort_and_pad(self, data, batch_size):
num_batch = int(math.ceil(len(data) / batch_size)) # math.ceil(),向上取整
sorted_data = sorted(data, key=lambda x: len(x[0]))
batch_data = list()
for i in range(num_batch):
batch_data.append(self.pad_data(sorted_data[i * batch_size: (i + 1) * batch_size]))
return batch_data
@staticmethod
def pad_data(data):
strings = []
chars = []
segs = []
targets = []
max_length = max([len(sentence[0]) for sentence in data])
for line in data:
string, char, seg, target = line
padding = [0] * (max_length - len(string))
strings.append(string + padding)
chars.append(char + padding)
segs.append(seg + padding)
targets.append(target + padding)
return [strings, chars, segs, targets]
def iter_batch(self, shuffle=False):
if shuffle:
random.shuffle(self.batch_data)
for idx in range(self.len_data):
yield self.batch_data[idx]
if __name__ == "__main__":
pass
| churximi/Car-NER | data_utils.py | data_utils.py | py | 14,187 | python | en | code | 0 | github-code | 36 |
41249722421 | #!/usr/bin/env python3
#-*- encoding: UTF-8 -*-
"""Escreva um programa que leia a quantidade de dias, horas, minutos e segundos do usuário. Calcule
o total em segundos."""
def main():
dias = int(input("Informe a quantidade de dias: "))
horas = int(input("Informe a quantidade de horas: "))
minutos = int(input("Informe a quantidade de minutos: "))
total = (minutos * 60) + (horas * 3600) + ((dias * 24) * 3600)
print(" %d dias + %d horas + %d minutos = %d segundos" %(dias, horas, minutos, total))
if __name__ == "__main__":
main() | luizfelipe1914/python4zumbis | Lista01/Q03.py | Q03.py | py | 559 | python | pt | code | 0 | github-code | 36 |
71636332263 | from miniMax import *
from tree_parser import *
from alphaBeta import *
#filename = sys.argv[1]
#print ("hello world! " + filename)
#data_list = parse_data_as_list(filename)
data_tree = GameTree()
#data_tree.build_tree(data_list)
#data_tree.armarArbol(1,3)
data_tree.armarArbol([99,0,0,0,0,0],[99,99,0,0, 99,0],3)
mini=MiniMax(data_tree)
print("MiniMax")
bestMove=mini.minimax(mini.root)
print()
print("alpha-beta")
alpha=AlphaBeta(data_tree)
alpha.alpha_beta_search(alpha.root)
| jpcifuentes16/Dots-And-Boxes-Ai | con MiniMax/main.py | main.py | py | 484 | python | en | code | 0 | github-code | 36 |
37350976457 | """Zero-field splitting.
See::
Spin decontamination for magnetic dipolar coupling calculations:
Application to high-spin molecules and solid-state spin qubits
Timur Biktagirov, Wolf Gero Schmidt, and Uwe Gerstmann
Phys. Rev. Research 2, 022024(R) – Published 30 April 2020
"""
from math import pi
from typing import List, Tuple, Dict
import numpy as np
from ase.units import Bohr, Ha, _c, _e, _hplanck
from my_gpaw.calculator import GPAW
from my_gpaw.grid_descriptor import GridDescriptor
from my_gpaw.typing import Array1D, Array2D, Array4D
from my_gpaw.hyperfine import alpha # fine-structure constant: ~ 1 / 137
from my_gpaw.setup import Setup
from my_gpaw.pw.lfc import PWLFC
from my_gpaw.pw.descriptor import PWDescriptor
from my_gpaw.mpi import serial_comm
def zfs(calc: GPAW,
method: int = 1) -> Array2D:
"""Zero-field splitting.
Calculate magnetic dipole coupling tensor in eV.
"""
(kpt1, kpt2), = calc.wfs.kpt_qs # spin-polarized and gamma only
nocc1 = (kpt1.f_n > 0.5).sum()
nocc2 = (kpt2.f_n > 0.5).sum()
assert nocc1 == nocc2 + 2, (nocc1, nocc2)
if method == 1:
wf1 = WaveFunctions.from_calc(calc, 0, nocc1 - 2, nocc1)
wf12 = [wf1]
else:
wf1 = WaveFunctions.from_calc(calc, 0, 0, nocc1)
wf2 = WaveFunctions.from_calc(calc, 1, 0, nocc2)
wf12 = [wf1, wf2]
D_vv = np.zeros((3, 3))
if calc.world.rank == 0:
compensation_charge = create_compensation_charge(wf1.setups,
wf1.pd,
calc.spos_ac)
for wfa in wf12:
for wfb in wf12:
d_vv = zfs1(wfa, wfb, compensation_charge)
D_vv += d_vv
calc.world.broadcast(D_vv, 0)
return D_vv
class WaveFunctions:
def __init__(self,
psit_nR: Array4D,
P_ani: Dict[int, Array2D],
spin: int,
setups: List[Setup],
gd: GridDescriptor = None,
pd: PWDescriptor = None):
"""Container for wave function in real-space and projections."""
self.pd = pd or PWDescriptor(ecut=None, gd=gd)
self.psit_nR = psit_nR
self.P_ani = P_ani
self.spin = spin
self.setups = setups
@staticmethod
def from_calc(calc: GPAW, spin: int, n1: int, n2: int) -> 'WaveFunctions':
"""Create WaveFunctions object GPAW calculation."""
kpt = calc.wfs.kpt_qs[0][spin]
gd = calc.wfs.gd.new_descriptor(pbc_c=np.ones(3, bool),
comm=serial_comm)
psit_nR = gd.empty(n2 - n1)
for band, psit_R in enumerate(psit_nR):
psit_R[:] = calc.get_pseudo_wave_function(
band + n1,
spin=spin) * Bohr**1.5
return WaveFunctions(psit_nR,
kpt.projections.as_dict_on_master(n1, n2),
spin,
calc.setups,
gd=gd)
def __len__(self) -> int:
return len(self.psit_nR)
def create_compensation_charge(setups: List[Setup],
pd: PWDescriptor,
spos_ac: Array2D) -> PWLFC:
compensation_charge = PWLFC([data.ghat_l for data in setups], pd)
compensation_charge.set_positions(spos_ac)
return compensation_charge
def zfs1(wf1: WaveFunctions,
wf2: WaveFunctions,
compensation_charge: PWLFC) -> Array2D:
"""Compute dipole coupling."""
pd = wf1.pd
setups = wf1.setups
N2 = len(wf2)
G_G = pd.G2_qG[0]**0.5
G_G[0] = 1.0
G_Gv = pd.get_reciprocal_vectors(add_q=False) / G_G[:, np.newaxis]
n_sG = pd.zeros(2)
for n_G, wf in zip(n_sG, [wf1, wf2]):
D_aii = {}
Q_aL = {}
for a, P_ni in wf.P_ani.items():
D_ii = np.einsum('ni, nj -> ij', P_ni, P_ni)
D_aii[a] = D_ii
Q_aL[a] = np.einsum('ij, ijL -> L', D_ii, setups[a].Delta_iiL)
for psit_R in wf.psit_nR:
n_G += pd.fft(psit_R**2)
compensation_charge.add(n_G, Q_aL)
nn_G = (n_sG[0] * n_sG[1].conj()).real
D_vv = zfs2(pd, G_Gv, nn_G)
n_nG = pd.empty(N2)
for n1, psit1_R in enumerate(wf1.psit_nR):
D_anii = {}
Q_anL = {}
for a, P1_ni in wf1.P_ani.items():
D_nii = np.einsum('i, nj -> nij', P1_ni[n1], wf2.P_ani[a])
D_anii[a] = D_nii
Q_anL[a] = np.einsum('nij, ijL -> nL',
D_nii, setups[a].Delta_iiL)
for n_G, psit2_R in zip(n_nG, wf2.psit_nR):
n_G[:] = pd.fft(psit1_R * psit2_R)
compensation_charge.add(n_nG, Q_anL)
nn_G = (n_nG * n_nG.conj()).sum(axis=0).real
D_vv -= zfs2(pd, G_Gv, nn_G)
D_vv -= np.trace(D_vv) / 3 * np.eye(3) # should be traceless
sign = 1.0 if wf1.spin == wf2.spin else -1.0
return sign * alpha**2 * pi * D_vv * Ha
def zfs2(pd: PWDescriptor,
G_Gv: Array2D,
nn_G: Array1D) -> Array2D:
"""Integral."""
D_vv = np.einsum('gv, gw, g -> vw', G_Gv, G_Gv, nn_G)
D_vv *= 2 * pd.gd.dv / pd.gd.N_c.prod()
return D_vv
def convert_tensor(D_vv: Array2D,
unit: str = 'eV') -> Tuple[float, float, Array1D, Array2D]:
"""Convert 3x3 tensor to D, E and easy axis.
Input tensor must be in eV and the result can be returned in
eV, μeV, MHz or 1/cm acording to the value uf *unit*
(must be one of "eV", "ueV", "MHz", "1/cm").
>>> D_vv = np.diag([1, 2, 3])
>>> D, E, axis, _ = convert_tensor(D_vv)
>>> D
4.5
>>> E
0.5
>>> axis
array([0., 0., 1.])
"""
if unit == 'ueV':
scale = 1e6
elif unit == 'MHz':
scale = _e / _hplanck * 1e-6
elif unit == '1/cm':
scale = _e / _hplanck / _c / 100
elif unit == 'eV':
scale = 1.0
else:
raise ValueError(f'Unknown unit: {unit}')
(e1, e2, e3), U = np.linalg.eigh(D_vv * scale)
if abs(e1) > abs(e3):
D = 1.5 * e1
E = 0.5 * (e2 - e3)
axis = U[:, 0]
else:
D = 1.5 * e3
E = 0.5 * (e2 - e1)
axis = U[:, 2]
return D, E, axis, D_vv * scale
def main(argv: List[str] = None) -> Array2D:
"""CLI interface."""
import argparse
parser = argparse.ArgumentParser(
prog='python3 -m gpaw.zero_field_splitting',
description='...')
add = parser.add_argument
add('file', metavar='input-file',
help='GPW-file with wave functions.')
add('-u', '--unit', default='ueV', choices=['ueV', 'MHz', '1/cm'],
help='Unit. Must be "ueV" (micro-eV, default), "MHz" or "1/cm".')
add('-m', '--method', type=int, default=1)
args = parser.parse_intermixed_args(argv)
calc = GPAW(args.file)
D_vv = zfs(calc, args.method)
D, E, axis, D_vv = convert_tensor(D_vv, args.unit)
unit = args.unit
if unit == 'ueV':
unit = 'μeV'
print('D_ij = (' +
',\n '.join('(' + ', '.join(f'{d:10.3f}' for d in D_v) + ')'
for D_v in D_vv) +
') ', unit)
print('i, j = x, y, z')
print()
print(f'D = {D:.3f} {unit}')
print(f'E = {E:.3f} {unit}')
x, y, z = axis
print(f'axis = ({x:.3f}, {y:.3f}, {z:.3f})')
return D_vv
if __name__ == '__main__':
main()
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/zero_field_splitting.py | zero_field_splitting.py | py | 7,481 | python | en | code | 0 | github-code | 36 |
39804372033 | from core_operations.models import US_COUNTRY_CODE, NUMBER_OF_DAYS_IN_A_YEAR, LIST_OF_STATES_IN_US
from core_operations.models import FormattedPhoneNumberField, YearsOfWorkField
from django.db import models
import re
from datetime import date
from datetime import datetime
# from internal_users.models import InternalUser
from django.utils import timezone
from faker import Faker
fake = Faker()
# added on 2023-06-03. common operational models, functions shall be defined in core_operations app.
UNASSIGNED = 'Unassigned'
VISITOR = 'visitor only'
SERVICE_FRONT = 'service advisor'
SERVICE_GARAGE = 'service technican'
TALENT_MANAGEMENT = 'talent management'
ACCOUNTING = 'Accounting'
CODING_MASTERS = 'code masters'
CYBER_SECURITY = 'cyber security'
TRAINEE = 'Trainee'
LEGAL = 'legal'
DEPARTMENTS = ((UNASSIGNED, 'your deparment has not been assigned yet.'),
(VISITOR, 'visitor group'),
(SERVICE_FRONT, 'service advisor group'),
(SERVICE_GARAGE, 'service technican group'),
(TALENT_MANAGEMENT, 'talent management group'),
(LEGAL, 'legal group'),
(TRAINEE, 'trainee group'),
(CODING_MASTERS, 'code master group'),
(CYBER_SECURITY, 'cyber security group'),
)
PAY_TYPE_UNASSIGNED = 0
PAY_TYPE_HOURLY = 1
PAY_TYPE_SALARY = 2
PAY_TYPE_BONUS = 3
PAY_TYPE_INTERNSHIP = 4
PAY_TYPE_OTHER1 = 5
PAY_TYPE_OTHER2 = 6
PAY_TYPES = ((PAY_TYPE_UNASSIGNED, 'unassigned pay type. Must be assigned before the first work day.'),
(PAY_TYPE_HOURLY, 'hourly'),
(PAY_TYPE_SALARY, 'salaried'),
(PAY_TYPE_INTERNSHIP, 'internship'),
(PAY_TYPE_BONUS, 'bonus pay'),
(PAY_TYPE_OTHER1, 'pay type other-1'),
(PAY_TYPE_OTHER2, 'pay type other-2'),
)
# 2023-05-23 add pay frequency choices
# Weekly – 52 paychecks per year.
# Biweekly – 26 paychecks per year.
# Semi-monthly – 24 paychecks per year.
# Monthly – 12 paychecks per year.
PAY_FREQUENCY_UNDEFINED = 0
PAY_FREQUENCY_DAILY = 1
PAY_FREQUENCY_WEEKLY = 2
PAY_FREQUENCY_BIWEEKLY = 3
PAY_FREQUENCY_SEMIMONTHLY = 4
PAY_FREQUENCY_MONTHLY = 5
PAY_FREQUENCY_SEMIANNUALLY = 6
PAY_FREQUENCY_ANNUALLY = 7
PAY_FREQUENCY_RESERVE1 = 8
PAY_FREQUENCY_RESERVE2 = 9
PAY_FREQUENCY_LIST = ((PAY_FREQUENCY_UNDEFINED, 'pay frequency not defined'),
(PAY_FREQUENCY_DAILY, 'daily'),
(PAY_FREQUENCY_WEEKLY, 'weekly'),
(PAY_FREQUENCY_BIWEEKLY, 'bi-weekly'),
(PAY_FREQUENCY_SEMIMONTHLY, 'semi-monthly'),
(PAY_FREQUENCY_MONTHLY, 'monthly'),
(PAY_FREQUENCY_SEMIANNUALLY, 'monthly'),
(PAY_FREQUENCY_ANNUALLY, 'monthly'),
(PAY_FREQUENCY_RESERVE1,
'reserved pay frequency 1; not used yet'),
(PAY_FREQUENCY_RESERVE2,
'reserved pay frequency 2; not used yet'),
)
class TalentsModel(models.Model):
talent_id = models.BigAutoField(primary_key=True)
talent_employee_id = models.IntegerField(unique=True)
talent_first_name = models.CharField(
max_length=50, null=False, verbose_name="Legal First Name (as in driver license (DL) or passport)")
talent_last_name = models.CharField(
max_length=50, null=False, verbose_name="Legal Last Name (as in driver license (DL) or passport)")
talent_middle_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name="Middle Name")
talent_preferred_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name="Preferred Name")
talent_email = models.EmailField(max_length=50, blank=True, null=True)
# this custom field works with a glitch as of 2023-06-03.
talent_phone_number_primary = FormattedPhoneNumberField()
talent_phone_number_primary_digits_only = models.CharField(
max_length=20, null=True, blank=True)
talent_phone_number_alternates_01 = FormattedPhoneNumberField(null=True)
talent_phone_number_alternates_02 = FormattedPhoneNumberField(null=True)
talent_emergency_contact = models.CharField(
max_length=200, null=True, blank=True)
talent_date_of_birth = models.DateField(
verbose_name='Date of Birth (DOB)', null=True)
talent_ssn = models.CharField(max_length=15,
verbose_name='SSN or Tax ID (TIN)', null=True)
talent_physical_address_01 = models.CharField(
verbose_name='street address 01', max_length=100)
talent_physical_address_02 = models.CharField(
verbose_name='street address 02 (apt numbers, unit #, etc.)', max_length=100, blank=True, null=True)
talent_physical_address_city = models.CharField(max_length=50, null=True)
talent_physical_address_state = models.CharField(max_length=2, null=True)
talent_physical_address_zip_code = models.CharField(
max_length=10, null=True)
talent_physical_address_country = models.CharField(verbose_name='Country',
max_length=50, default='US')
talent_mailing_address_is_the_same_physical_address = models.BooleanField(
default=True)
talent_mailing_address_01 = models.CharField(
verbose_name='mailing address 01', max_length=100)
talent_mailing_address_02 = models.CharField(
verbose_name=' mailing address 02 (apt numbers, unit #, etc)', max_length=100, blank=True, null=True)
talent_mailing_address_city = models.CharField(max_length=50, null=True)
talent_mailing_address_state = models.CharField(max_length=2, null=True)
talent_mailing_address_zip_code = models.CharField(max_length=10)
talent_mailing_address_country = models.CharField(
max_length=50, default='US')
talent_education_level = models.CharField(max_length=100, default='None')
talent_certifications = models.CharField(
max_length=500, null=True, blank=True)
talent_hire_date = models.DateTimeField(blank=True, null=True)
talent_department = models.CharField(max_length=50,
choices=DEPARTMENTS,
default=UNASSIGNED)
talent_supervisor = models.ForeignKey(
'self', on_delete=models.SET_NULL, null=True, blank=True)
talent_work_start_date = models.DateTimeField(blank=True, null=True)
talent_pay_type = models.PositiveSmallIntegerField(default=PAY_TYPE_UNASSIGNED,
choices=PAY_TYPES)
talent_pay_rate = models.DecimalField(
max_digits=10, decimal_places=2, default=0.00)
talent_pay_frequency = models.PositiveSmallIntegerField(choices=PAY_FREQUENCY_LIST,
default=PAY_FREQUENCY_UNDEFINED)
talent_previous_department = models.CharField(max_length=50,
choices=DEPARTMENTS,
default=UNASSIGNED,
)
talent_discharge_date = models.DateTimeField(null=True, blank=True)
talent_years_of_work = YearsOfWorkField(null=True, blank=True)
talent_HR_remarks_json = models.TextField(null=True, blank=True)
talent_incident_record_json = models.TextField(null=True, blank=True)
# added on 2023-06-02 to store the future talent_digital_files
talent_digital_file_storage_path_01 = models.CharField(
max_length=2000, null=True, blank=True)
talent_digital_file_storage_path_02 = models.CharField(
max_length=2000, null=True, blank=True)
talent_is_active = models.BooleanField(default=True)
talent_created_at = models.DateTimeField(auto_now_add=True)
talent_last_udpated_at = models.DateTimeField(auto_now=True)
# talent_created_by_user = models.ForeignKey(
# InternalUser, null=True, on_delete=models.SET_NULL)
@property
def talent_full_name(self):
return f"{self.talent_first_name} {self.talent_last_name} {self.talent_middle_name}"
@property
def talent_full_physical_address(self):
addr_fields = [self.talent_physical_address_01, self.talent_physical_address_02, self.talent_physical_address_city, self.talent_physical_address_state.upper(),
self.talent_physical_address_zip_code]
full_address = " ".join(
[field for field in addr_fields if field is not None]).strip()
if len(full_address) != 0:
full_address = full_address + " " + self.talent_physical_address_country
else:
full_address = full_address
return full_address
@property
def talent_full_mailing_address(self):
addr_fields = [self.talent_mailing_address_01, self.talent_mailing_address_02, self.talent_mailing_address_city, self.talent_mailing_address_state,
self.talent_mailing_address_zip_code]
full_address = " ".join(
[field for field in addr_fields if field is not None]).strip()
# if the first 5 fields are empty; do not add the country in the end, return none instead.
if len(full_address) != 0:
full_address = full_address + " " + self.talent_mailing_address_country
else:
full_address = full_address
return full_address
def __init__(self, *args, **kwargs):
super(TalentsModel, self).__init__(*args, **kwargs)
self._initial_state = {field: getattr(
self, field) for field in self.fields_to_track()}
@classmethod
def fields_to_track(cls):
return ['talent_pay_rate', 'talent_pay_frequency', 'talent_date_of_birth', 'talent_email', 'talent_phone_number_primary']
def get_changed_fields(self):
changed_fields = {}
for field in self.fields_to_track():
if self._initial_state[field] != getattr(self, field):
changed_fields[field] = self._initial_state[field]
return changed_fields
def save(self, *args, **kwargs):
# if not self.pk:
# Only set the talent_created_by_user_id if this is a new instance
# self.talent_created_by_user_id = request.user.id
# super(TalentsModel, self).save(*args, **kwargs)
# creating a employee_id that is different from the talent_id that is used in the database.
# employee ID start from 1024
if not self.talent_employee_id:
last_talent_employee = TalentsModel.objects.order_by(
'-talent_employee_id').first()
if last_talent_employee:
self.talent_employee_id = last_talent_employee.talent_employee_id + 2
else:
self.talent_employee_id = 1024
elif self.talent_employee_id and self.pk:
self.talent_employee_id = self.talent_employee_id
super(TalentsModel, self).save(*args, **kwargs)
def __str__(self):
return f"{self.talent_first_name} {self.talent_last_name} {self.talent_middle_name}"
class Meta:
db_table = 'talent_managment'
ordering = ['-talent_id']
class TalentDocuments(models.Model):
document_id = models.BigAutoField(primary_key=True)
talent = models.ForeignKey(
TalentsModel, on_delete=models.SET_NULL, null=True)
talent_employment_docs = models.FileField(
upload_to='2023_talent_employment_docs')
talent_uploaded_photos = models.ImageField(upload_to='photos')
uploaded_date = models.DateTimeField(default=timezone.now)
document_is_active = models.BooleanField(default=True)
class Meta:
db_table = 'talent_documents'
ordering = ['-talent_id']
class TalentAudit(models.Model):
talent_audit_id = models.BigAutoField(primary_key=True)
# Changed from OneToOneField to ForeignKey to allow multiple audit records per talent
talent = models.ForeignKey(TalentsModel, on_delete=models.CASCADE)
created_by = models.ForeignKey(
'internal_users.InternalUser', related_name="created_audits", swappable=True, on_delete=models.SET_NULL, null=True)
created_at = models.DateTimeField(auto_now_add=True)
field_changed = models.CharField(max_length=50, null=True, blank=True)
old_value = models.CharField(max_length=255, null=True, blank=True)
new_value = models.CharField(max_length=255, null=True, blank=True)
class Meta:
db_table = 'talent_audit'
ordering = ['-talent_audit_id']
| zjgcainiao/new_place_at_76 | talent_management/models.py | models.py | py | 12,604 | python | en | code | 0 | github-code | 36 |
36788048136 | from pydantic import BaseSettings, BaseModel
from pathlib import Path
from .languages import WIKIPEDIA_LANGS
import toml
class Settings(BaseSettings):
status: str = "unknown"
logging_chat: int = None
db_path: Path = Path("jdanbot.db")
music_path: Path = Path("media/music")
admin_notes: list[str]
bot_owners: list[int] = [795449748, 0]
class Tokens(BaseModel):
bot_token: str
class Schedule(BaseModel):
delay_seconds: int = 20
katz_bots: bool = False
class Egg(BaseModel):
commands: list[str]
audio: Path
tokens: Tokens
schedule: Schedule = Schedule()
eggs: list[Egg]
with open("settings.toml") as file:
settings_file = toml.loads(file.read())
with open(".secrets.toml") as file:
secrets_file = toml.loads(file.read())
settings = Settings.parse_obj(settings_file | secrets_file)
BASE_DIR = Path(__file__).parent.parent.parent
LOCALES_DIR = BASE_DIR / "locales"
WIKIPEDIA_SHORTCUTS = {
"ru": ["w"],
"en": ["v"],
"uk": ["wua", "wikiua"]
}
WIKI_COMMANDS = []
for lang in WIKIPEDIA_LANGS:
WIKI_COMMANDS.extend([f"wiki{lang}", f"w{lang}"])
for lang in WIKIPEDIA_SHORTCUTS:
WIKI_COMMANDS.extend(WIKIPEDIA_SHORTCUTS[lang])
| jDan735/jdan734-bot | bot/config/config.py | config.py | py | 1,254 | python | en | code | 5 | github-code | 36 |
34182105193 | import time
from unittest import skip
from qiskit.providers.jobstatus import JobStatus
from qiskit_ibm_runtime.exceptions import RuntimeJobTimeoutError
from ..unit.mock.proxy_server import MockProxyServer, use_proxies
from ..ibm_test_case import IBMIntegrationJobTestCase
from ..decorators import run_integration_test
from ..utils import cancel_job_safe, wait_for_status
class TestIntegrationResults(IBMIntegrationJobTestCase):
"""Integration tests for result callbacks."""
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_result_callback(self, service):
"""Test result callback."""
def result_callback(job_id, result):
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
nonlocal callback_err
if job_id != job.job_id():
callback_err.append(f"Unexpected job ID: {job_id}")
if "interim_results" in result and result["interim_results"] != int_res:
callback_err.append(f"Unexpected interim result: {result}")
int_res = "foo"
final_it = 0
callback_err = []
iterations = 3
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
interim_results=int_res,
callback=result_callback,
)
job.wait_for_final_state()
self.assertEqual(iterations - 1, final_it)
self.assertFalse(callback_err)
self.assertIsNotNone(job._ws_client._server_close_code)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_result_callback_with_job_result(self, service):
"""Test result callback along with job result."""
def result_callback(job_id, result):
nonlocal count
count = count + 1
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
nonlocal callback_err
if job_id != job.job_id():
callback_err.append(f"Unexpected job ID: {job_id}")
if "interim_results" in result and result["interim_results"] != int_res:
callback_err.append(f"Unexpected interim result: {result}")
int_res = "foo"
count = 0
final_it = 0
callback_err = []
iterations = 3
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
interim_results=int_res,
callback=result_callback,
)
job.result()
self.assertEqual(iterations - 1, final_it)
self.assertEqual(iterations + 1, count)
self.assertFalse(callback_err)
self.assertIsNotNone(job._ws_client._server_close_code)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_stream_results(self, service):
"""Test stream_results method."""
def result_callback(job_id, result):
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
nonlocal callback_err
if job_id != job.job_id():
callback_err.append(f"Unexpected job ID: {job_id}")
if "interim_results" in result and result["interim_results"] != int_res:
callback_err.append(f"Unexpected interim result: {result}")
int_res = "bar"
final_it = 0
callback_err = []
iterations = 3
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
interim_results=int_res,
)
job.stream_results(result_callback)
job.wait_for_final_state()
self.assertEqual(iterations - 1, final_it)
self.assertFalse(callback_err)
self.assertIsNotNone(job._ws_client._server_close_code)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_stream_results_done(self, service):
"""Test streaming results after job is done."""
def result_callback(job_id, result):
# pylint: disable=unused-argument
nonlocal called_back_count
called_back_count += 1
called_back_count = 0
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
interim_results="foobar",
)
job.wait_for_final_state()
job._status = JobStatus.RUNNING # Allow stream_results()
job.stream_results(result_callback)
time.sleep(2)
# Callback is expected twice because both interim and final results are returned
self.assertEqual(2, called_back_count)
self.assertIsNotNone(job._ws_client._server_close_code)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_retrieve_interim_results(self, service):
"""Test retrieving interim results with API endpoint"""
job = self._run_program(service)
job.wait_for_final_state()
interim_results = job.interim_results()
self.assertIn("iteration", interim_results[0])
self.assertIn("counts", interim_results[0])
@run_integration_test
def test_result_timeout(self, service):
"""Test job result timeout"""
job = self._run_program(service)
with self.assertRaises(RuntimeJobTimeoutError):
job.result(0.1)
@run_integration_test
def test_wait_for_final_state_timeout(self, service):
"""Test job wait_for_final_state timeout"""
job = self._run_program(service)
with self.assertRaises(RuntimeJobTimeoutError):
job.wait_for_final_state(0.1)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_callback_error(self, service):
"""Test error in callback method."""
def result_callback(job_id, result):
# pylint: disable=unused-argument
if "iteration" in result and result["iteration"] == 0:
raise ValueError("Kaboom!")
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
final_it = 0
iterations = 10
inputs = {"iterations": iterations, "sleep_per_iteration": 3}
with self.assertLogs("qiskit_ibm_runtime", level="WARNING") as err_cm:
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
inputs=inputs,
interim_results="foo",
callback=result_callback,
)
job.wait_for_final_state()
self.assertIn("Kaboom", ", ".join(err_cm.output))
self.assertEqual(iterations - 1, final_it)
self.assertIsNotNone(job._ws_client._server_close_code)
@run_integration_test
def test_callback_cancel_job(self, service):
"""Test canceling a running job while streaming results."""
def result_callback(job_id, result):
# pylint: disable=unused-argument
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
final_it = 0
iterations = 5
sub_tests = [JobStatus.QUEUED, JobStatus.RUNNING]
for status in sub_tests:
with self.subTest(status=status):
if status == JobStatus.QUEUED:
_ = self._run_program(service)
job = self._run_program(
service=service,
interim_results="foo",
callback=result_callback,
)
wait_for_status(job, status)
if not cancel_job_safe(job, self.log):
return
time.sleep(3) # Wait for cleanup
self.assertIsNotNone(job._ws_client._server_close_code)
self.assertLess(final_it, iterations)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_websocket_proxy(self, service):
"""Test connecting to websocket via proxy."""
def result_callback(job_id, result): # pylint: disable=unused-argument
nonlocal callback_called
callback_called = True
MockProxyServer(self, self.log).start()
callback_called = False
with use_proxies(service, MockProxyServer.VALID_PROXIES):
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
callback=result_callback,
)
job.wait_for_final_state()
self.assertTrue(callback_called)
@run_integration_test
def test_websocket_proxy_invalid_port(self, service):
"""Test connecting to websocket via invalid proxy port."""
def result_callback(job_id, result): # pylint: disable=unused-argument
nonlocal callback_called
callback_called = True
callback_called = False
invalid_proxy = {
"https": "http://{}:{}".format(
MockProxyServer.PROXY_IP_ADDRESS, MockProxyServer.INVALID_PROXY_PORT
)
}
# TODO - verify WebsocketError in output log. For some reason self.assertLogs
# doesn't always work even when the error is clearly logged.
with use_proxies(service, invalid_proxy):
job = self._run_program(service, callback=result_callback)
job.wait_for_final_state()
self.assertFalse(callback_called)
| Qiskit/qiskit-ibm-runtime | test/integration/test_results.py | test_results.py | py | 9,622 | python | en | code | 106 | github-code | 36 |
74754794343 | def get_employees():
db1 = {}
key = 1
while key == 1:
name = input("Введите имя рабочего:\n")
salary = int(input(f"Введите зарплату, которую получает {name}:\n"))
db = {name: salary}
db1.update(db)
exit_ = input("Продолжить?:\n")
if exit_ == "да":
key = 1
else:
key = 0
return print(db1)
| VadimZ92/Import_Module_Package | application/db/people.py | people.py | py | 444 | python | ru | code | 0 | github-code | 36 |
10210350419 | from sklearn import datasets
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
# Multiclass Classification Datasets
def load_mnist():
mnist = datasets.load_digits(as_frame=True)
mnist_X, mnist_y = mnist.data, mnist.target
X_train, X_test, y_train, y_test = train_test_split(mnist_X, mnist_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_forest_covertypes():
forest = datasets.fetch_covtype(as_frame=True)
forest_X, forest_y = forest.data, forest.target
forest_X = forest_X[:15000]
forest_y = forest_y[:15000]
X_train, X_test, y_train, y_test = train_test_split(forest_X, forest_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_kepler_exoplanets():
kepler = pd.read_csv('data/kepler_exoplanet.csv')
not_X_columns = ['rowid', 'kepid', 'kepoi_name', 'kepler_name', 'koi_score', 'koi_pdisposition', 'koi_disposition', 'koi_teq_err1', 'koi_teq_err2', 'koi_tce_delivname']
kepler_X = kepler.drop(not_X_columns, axis=1)
kepler_y = kepler['koi_pdisposition']
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
kepler_X_array = imputer.fit_transform(kepler_X)
kepler_X = pd.DataFrame(kepler_X_array, columns=kepler_X.columns)
X_train, X_test, y_train, y_test = train_test_split(kepler_X, kepler_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
# Regression Datasets
def load_cali_housing():
california = datasets.fetch_california_housing(as_frame=True)
cali_X, cali_y = california.data, california.target
X_train, X_test, y_train, y_test = train_test_split(cali_X, cali_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_melbourne_housing():
melbourne = pd.read_csv('data/melbourne_housing_data.csv')
not_X_columns = ['Address', 'Price', 'SellerG', 'Date']
melbourne_X = melbourne.drop(not_X_columns, axis=1)
melbourne_y = melbourne['Price']
# Convert all features to integers
categorical_cols = ['Suburb', 'Type', 'Method', 'CouncilArea', 'Regionname']
for col_name in categorical_cols:
melbourne_X[col_name] = pd.Categorical(melbourne_X[col_name]).codes
# Impute missing values
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
melbourne_X_array = imputer.fit_transform(melbourne_X)
melbourne_X = pd.DataFrame(melbourne_X_array, columns=melbourne_X.columns)
X_train, X_test, y_train, y_test = train_test_split(melbourne_X, melbourne_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_world_happiness():
happiness = pd.read_csv('data/world-happiness-report-2021.csv')
X_columns = ['Regional indicator', 'Logged GDP per capita', 'Social support', 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption']
happiness_X = happiness[X_columns]
happiness_y = happiness['Ladder score']
happiness_X['Regional indicator'] = pd.Categorical(happiness_X['Regional indicator']).codes
X_train, X_test, y_train, y_test = train_test_split(happiness_X, happiness_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
# Binary Classification Datasets
def load_heart_attack():
heart_attack = pd.read_csv('data/heart-attack.csv')
heart_X = heart_attack.drop('output', axis=1)
heart_y = heart_attack['output']
X_train, X_test, y_train, y_test = train_test_split(heart_X, heart_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_stroke():
stroke = pd.read_csv('data/healthcare-dataset-stroke-data.csv')
not_X_columns = ['id', 'stroke']
stroke_X = stroke.drop(not_X_columns, axis=1)
stroke_y = stroke['stroke']
# Convert all features to integers
categorical_cols = ['gender', 'ever_married', 'work_type', 'Residence_type', 'smoking_status']
for col_name in categorical_cols:
stroke_X[col_name] = pd.Categorical(stroke_X[col_name]).codes
# Impute missing values
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
stroke_X_array = imputer.fit_transform(stroke_X)
stroke_X = pd.DataFrame(stroke_X_array, columns=stroke_X.columns)
X_train, X_test, y_train, y_test = train_test_split(stroke_X, stroke_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_telecom():
telecom = pd.read_csv('data/telecom_users.csv')
not_X_columns = ['Unnamed: 0', 'customerID', 'TotalCharges', 'Churn']
telecom_X = telecom.drop(not_X_columns, axis=1)
telecom_y = pd.Categorical(telecom['Churn']).codes
# Convert all features to integers
not_categorical_cols = ['SeniorCitizen', 'tenure', 'MonthlyCharges']
for col_name in telecom_X.columns:
if col_name not in not_categorical_cols:
telecom_X[col_name] = pd.Categorical(telecom_X[col_name]).codes
# Impute missing values
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
telecom_X_array = imputer.fit_transform(telecom_X)
telecom_X = pd.DataFrame(telecom_X_array, columns=telecom_X.columns)
X_train, X_test, y_train, y_test = train_test_split(telecom_X, telecom_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_multiclass():
all_data = {}
all_data['mnist'] = load_mnist()
all_data['forest_covertypes'] = load_forest_covertypes()
all_data['kepler_exoplanets'] = load_kepler_exoplanets()
return all_data
def load_regression():
all_data = {}
all_data['california_housing'] = load_cali_housing()
all_data['melbourne_housing'] = load_melbourne_housing()
all_data['world_happiness'] = load_world_happiness()
return all_data
def load_binary():
all_data = {}
all_data['heart_attack'] = load_heart_attack()
all_data['stroke'] = load_stroke()
all_data['telecom'] = load_telecom()
return all_data
| eccabay/CMA-ES_hyperparameters | load_data.py | load_data.py | py | 6,168 | python | en | code | 0 | github-code | 36 |
8890281336 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 18 11:28:15 2017
@author: vitorhadad
"""
import torch
from torch import nn, cuda
from torch.autograd import Variable
import torch.nn.functional as F
from torch import optim
import numpy as np
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class RNN(nn.Module):
def __init__(self,
input_size,
hidden_size,
num_layers=1,
bidirectional = False,
class_weights = [1, 10]):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = 2 if bidirectional else 1
self.seq_len = 1
self.num_classes = 2
self.class_weights = class_weights
self.loss_fn = nn.CrossEntropyLoss(reduce = False,
weight = torch.FloatTensor(self.class_weights))
self.count_loss_fn = nn.MSELoss()
self.rnn = nn.LSTM(input_size,
hidden_size,
num_layers,
bidirectional = bidirectional)
self.logit_layer = nn.Linear(hidden_size, self.num_classes)
self.count_layer = nn.Linear(hidden_size, 1)
self.c0 = nn.Parameter(torch.randn(num_layers * self.num_directions,
1,
hidden_size), requires_grad = True)
self.h0 = nn.Parameter(torch.randn(num_layers * self.num_directions,
1,
hidden_size), requires_grad = True)
self.optim = optim.Adam(self.parameters(), lr=0.001)
def forward(self, inputs, lens = None):
if lens is None:
lens = inputs.any(2).sum(0)
inputs = Variable(torch.FloatTensor(inputs),
requires_grad = False)
order = np.flip(np.argsort(lens), 0).astype(int)
order_r = torch.LongTensor(order[order])
seq = pack_padded_sequence(inputs[:,order,:], lens[order])
this_batch_size = seq.batch_sizes[0]
initial_state = (self.c0.repeat(1, this_batch_size, 1),
self.h0.repeat(1, this_batch_size, 1))
outputs, staten = self.rnn(seq, initial_state)
outputs, lens = pad_packed_sequence(outputs)
outputs = outputs[:, :, :self.hidden_size] +\
outputs[:, :, self.hidden_size:]
prelogits = outputs[:,order_r,:].transpose(1,0)
logits = self.logit_layer(prelogits)
precounts = prelogits.sum(1).squeeze()
counts = self.count_layer(precounts)
return logits, counts
def run(self, inputs, true_outputs, lens = None):
if lens is None:
lens = inputs.any(2).sum(0)
ylogits, ycount = self.forward(inputs, lens)
ytruth = Variable(torch.LongTensor(true_outputs), requires_grad = False)
logit_loss = 0
for i,l in enumerate(lens):
l = lens[i]
yh = ylogits[i,:l]
yt = ytruth[i,:l].view(-1)
try:
logit_loss += self.loss_fn(yh, yt).mean()
except RuntimeError as e:
print(e)
logit_loss /= batch_size
count_loss = self.count_loss_fn(ycount, ytruth.sum(1).float())
loss = logit_loss + count_loss
self.optim.zero_grad()
loss.backward()
self.optim.step()
return (logit_loss.data.numpy()[0],
count_loss.data.numpy()[0],
ylogits, ycount.data.numpy())
def __str__(self):
return "RNN_{}-{}"\
.format(self.hidden_size,
self.num_layers)
#%%
if __name__ == "__main__":
from matching.utils.data_utils import open_file, confusion
from sys import argv, platform
if platform == "darwin":
argv.extend(["abo", 1, 100, .5, np.random.randint(1e8)])
#if len(argv) > 1:
print("Creating new RNN")
env_type = argv[1]
num_layers = int(argv[2])
hidden_size = int(argv[3])
c = float(argv[4])
s = str(argv[5])
input_size = {"abo":24, "optn":294}
net = RNN(input_size=input_size[env_type],
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=True,
class_weights = [1,100*c])
batch_size = 32
open_every = 10
save_every = 500
log_every = 10
name = "{}-{}_{}".format(
str(net),
env_type,
s)
#%%
for i in range(10000000):
if i % open_every == 0:
X, Y, GN = open_file(env_type = env_type, open_GN = True, open_A = False)
SS = np.concatenate([X, GN], 2).transpose((1,0,2))
n = SS.shape[1]
idx = np.random.choice(n, size=batch_size)
inputs = SS[:,idx,:]
ytrue = Y[idx]
lens = inputs.any(2).sum(0)
avg_ones = np.hstack([Y[k,:l,0] for k,l in zip(idx, lens)]).mean()
if avg_ones > 0:
w = c*1/avg_ones
net.loss_fn = nn.CrossEntropyLoss(reduce = False,
weight = torch.FloatTensor([1, w]))
lloss,closs, ylogits, ycount = net.run(inputs, ytrue, lens)
cacc = np.mean(ycount.round() == ytrue.sum(1))
tp, tn, fp, fn = confusion(ylogits, ytrue, lens)
tpr = tp/(tp+fn)
tnr = tn/(tn+fp)
lacc = (tp + tn)/(tp+fp+tn+fn)
if tpr < .1:
c *= 1.05
if tnr < .1:
c *= .95
msg = "{:1.4f},{:1.4f},{:1.4f},"\
"{:1.4f},{:1.4f},{:1.4f},{:1.4f}"\
.format(lloss,
closs,
tpr, # True positive rate
tnr, # True negative rate
lacc, # Logits accuracy
cacc, # Count accuracy
w)
if i % log_every == 0:
print(msg)
if platform == "linux":
with open("results/" + name + ".txt", "a") as f:
print(msg, file = f)
if platform == "linux" and i % save_every == 0:
torch.save(net, "results/" + name)
| halflearned/organ-matching-rl | matching/deep_ml/count_lstm.py | count_lstm.py | py | 6,722 | python | en | code | 2 | github-code | 36 |
8519295436 | import pygame
import Snake, Apple
import os
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
pygame.display.set_caption("Sssssnake")
# 10x10 segments
winwidth = 200
winheight = 240
win = pygame.display.set_mode((winwidth, winheight))
# segment - 20x20px
segsize = 20
snake = Snake.Snake(0, 20, segsize, segsize-2, segsize-2)
apple = Apple.Apple(segsize//2, winwidth, winheight, segsize, snake.segments)
# font
font = pygame.font.SysFont("monospace", 15)
# sounds
eatsound = pygame.mixer.Sound('sounds/eat sound.wav')
losesound = pygame.mixer.Sound('sounds/lose sound.wav')
music = pygame.mixer.music.load('sounds/bg music.mp3')
pygame.mixer.music.play(-1)
def lost():
pygame.mixer.music.stop()
losesound.play()
global win, running, snake, score
gameover = font.render("GAME OVER :(", 1, (255, 255, 255))
playagain = font.render("Play again?", 1, (255, 255, 255))
yorn = font.render("(y) (n)", 1, (255, 255, 255))
win.blit(gameover, (winwidth//2 - 55, winheight//2 - 35))
win.blit(playagain, (winwidth//2 - 52, winheight//2 - 15))
win.blit(yorn, (winwidth//2 - 40, winheight//2))
pygame.display.update()
pygame.event.clear()
while True:
event = pygame.event.wait()
if event.type == pygame.QUIT:
running = False
break
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_y:
snake = Snake.Snake(0, 20, segsize, segsize-2, segsize-2)
apple.forbidden = snake.segments
score = 0
pygame.mixer.music.play(-1)
break
if event.key == pygame.K_n:
running = False
break
# mainloop
running = True
score = 0
while running:
pygame.time.delay(200)
keypressed = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN and not keypressed:
if event.key == pygame.K_UP and snake.direction != (1, 0):
snake.direction = (-1, 0)
keypressed = True
elif event.key == pygame.K_DOWN and snake.direction != (-1, 0):
snake.direction = (1, 0)
keypressed = True
elif event.key == pygame.K_LEFT and snake.direction != (0, 1):
snake.direction = (0, -1)
keypressed = True
elif event.key == pygame.K_RIGHT and snake.direction != (0, -1):
snake.direction = (0, 1)
keypressed = True
# calculating new position
tempx = snake.x + snake.vel * snake.direction[1]
tempy = snake.y + snake.vel * snake.direction[0]
if 0 <= tempx <= winwidth - snake.segwidth and segsize <= tempy <= winheight - segsize - snake.segheigth:
snake.x = tempx
snake.y = tempy
else:
#collision with borders
lost()
continue
snake.move()
# collision with snake
if snake.segments.count((snake.x, snake.y)) > 1:
lost()
continue
win.fill((0, 0, 0))
# collision with apple
if snake.x <= apple.x <= snake.x + segsize and snake.y <= apple.y <= snake.y + segsize:
eatsound.play()
snake.addsegment()
snake.draw(win)
apple.setposition()
score += 1
else:
snake.draw(win)
label = font.render("Score {}".format(score), 1, (255, 255, 255))
win.blit(label, (winwidth - 70, 0))
apple.draw(win)
pygame.draw.rect(win, (255, 255, 0), (1, 20, winwidth - 2, winheight - 2*segsize), 1)
pygame.display.update()
pygame.quit() | TZdybel/Games-with-Pygame | snake/main.py | main.py | py | 3,681 | python | en | code | 0 | github-code | 36 |
38516174008 | # pip install uszipcode
import pandas as pd
from uszipcode import SearchEngine
search = SearchEngine(simple_zipcode=False)
def add_coordinates(df):
'''Input: a pandas dataframe which includes a 'Zip' field representing a
US zip code.
Output: dataframe with 'lat' and 'lng' fields added which represent
latitude and longitude
'''
df['lat'] = df['Zip'].apply(lambda x: pd.Series(search.by_zipcode(x).lat))
df['lng'] = df['Zip'].apply(lambda x: pd.Series(search.by_zipcode(x).lng))
df['county'] = df['Zip'].apply(lambda x: pd.Series(search.by_zipcode(x).county))
def add_fips(df):
'''Input: a pandas dataframe which includes a 'Zip' field representing a
US zip code.
Output: dataframe with 'lat' and 'lng' fields added which represent
latitude and longitude
'''
colorado_fips = ['08001', '08003', '08005', '08007', '08009', '08011', '08013', '08014', '08015', '08017', '08019',
'08021', '08023', '08025', '08027', '08029', '08031', '08033', '08035', '08037', '08039', '08041',
'08043', '08045', '08047', '08049', '08051', '08053', '08055', '08057', '08059', '08061', '08063',
'08065', '08067', '08069', '08071', '08073', '08075', '08077', '08079', '08081', '08083', '08085',
'08087', '08089', '08091', '08093', '08095', '08097', '08099', '08101', '08103', '08105', '08107',
'08109', '08111', '08113', '08115', '08117', '08119', '08121', '08123', '08125']
colorado_counties = ['Adams County', 'Alamosa County', 'Arapahoe County', 'Archuleta County', 'Baca County', 'Bent County', 'Boulder County', 'Broomfield County',
'Chaffee County', 'Cheyenne County', 'Clear Creek County', 'Conejos County', 'Costilla County', 'Crowley County', 'Custer County',
'Delta County', 'Denver County', 'Dolores County', 'Douglas County', 'Eagle County', 'Elbert County', 'El Paso County', 'Fremont County',
'Garfield County', 'Gilpin County', 'Grand County', 'Gunnison County', 'Hinsdale County', 'Huerfano County', 'Jackson County', 'Jefferson County',
'Kiowa County', 'Kit Carson County', 'Lake County', 'La Plata County', 'Larimer County', 'Las Animas County', 'Lincoln County', 'Logan County',
'Mesa County', 'Mineral County', 'Moffat County', 'Montezuma County', 'Montrose County', 'Morgan County', 'Otero County', 'Ouray County',
'Park County', 'Phillips County', 'Pitkin County', 'Prowers County', 'Pueblo County', 'Rio Blanco County', 'Rio Grande County','Routt County',
'Saguache County', 'San Juan County', 'San Miguel County', 'Sedgwick County', 'Summit County', 'Teller County', 'Washington County',
'Weld County', 'Yuma County']
counties_fips_dict = dict(zip(colorado_counties, colorado_fips))
df['county'] = df['Zip'].apply(lambda x: pd.Series(search.by_zipcode(x).county))
col_only_df = df[df['county'].isin(colorado_counties)]
col_only_df['fip'] = col_only_df['county'].apply(lambda x: counties_fips_dict[x])
return col_only_df
| dslachar/analysis_of_crp_data | add_coordinates.py | add_coordinates.py | py | 3,241 | python | en | code | 0 | github-code | 36 |
19840085119 | import numpy as np
import tensorflow as tf
from mpl_toolkits import mplot3d
from d2l import tensorflow as d2l
def f(x):
return x ** 2
def f_grad(x):
return 2 * x
def gd(eta, f_grad):
x = 10.0
results = [x]
for i in range(10):
x -= eta * f_grad(x)
results.append(float(x))
print(f'Epoch 10, x: {x:f}')
return results
def show_trace(results, f):
n = max(abs(min(results)), abs(max(results)))
f_line = tf.range(-n, n, 0.01)
d2l.set_figsize()
d2l.plot([f_line, results], [[f(x) for x in f_line], [
f(x) for x in results]], 'x', 'f(x)', fmts=['-', '-o'])
def train_2d(trainer, steps=20):
x1, x2, s1, s2 = -5, -2, 0, 0
results = [(x1, x2)]
for i in range(steps):
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
return results
def show_trace_2d(f, results):
"""Show the trace of 2D variables during optimization."""
d2l.set_figsize()
d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1),
np.arange(-3.0, 1.0, 0.1))
d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
d2l.plt.xlabel('x1')
d2l.plt.ylabel('x2')
f = lambda x1, x2: x1 ** 2 + 2 * x2 ** 2
gradf = lambda x1, x2: (2 * x1, 4 * x2)
def gd(x1, x2, s1, s2):
(g1, g2) = gradf(x1, x2)
return (x1 - eta * g1, x2 - eta * g2, 0, 0)
if __name__ == "__main__":
# results = gd(0.3, f_grad)
# show_trace(results, f)
eta = 0.1
results = train_2d(gd)
print(results)
| AnhVietPham/Deep-Learning | optimization-algrithms/gradient-descent/main.py | main.py | py | 1,568 | python | en | code | 0 | github-code | 36 |
10713162688 | import os
import mujoco_py
import numpy as np
from mujoco_py import functions
from learn_seq.utils.general import get_mujoco_model_path
# object indicator in mujoco
MJ_SITE_OBJ = 6 # `site` objec
MJ_BODY_OBJ = 1 # `body` object
MJ_GEOM_OBJ = 5 # `geom` object
# geom types
MJ_CYLINDER = 5
MJ_BOX = 6
MJ_MESH = 7
def load_model(xml_name="round_pih.xml"):
"""Load a model from `mujoco/franka_pih`
:param type xml_name: Description of parameter `xml_name`.
:param type primitive: Description of parameter `primitive`.
:return: Description of returned object.
:rtype: type
"""
model_path = get_mujoco_model_path()
xml_path = os.path.join(model_path, xml_name)
model = mujoco_py.load_model_from_path(xml_path)
sim = mujoco_py.MjSim(model)
return sim
def attach_viewer(sim):
return mujoco_py.MjViewer(sim)
def set_state(sim, qpos, qvel):
assert qpos.shape == (sim.model.nq, ) and qvel.shape == (sim.model.nv, )
old_state = sim.get_state()
new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel, old_state.act,
old_state.udd_state)
sim.set_state(new_state)
sim.forward()
def get_contact_force(mj_model, mj_data, body_name, frame_pos, frame_quat):
"""Get the force acting on a body, with respect to a frame.
Note that mj_rnePostConstraint should be called before this function
to update the simulator state.
:param str body_name: Body name in mujoco xml model.
:return: force:torque format.
:rtype: np.array(6)
"""
bodyId = mujoco_py.functions.mj_name2id(mj_model, MJ_BODY_OBJ, body_name)
force_com = mj_data.cfrc_ext[bodyId, :]
# contact force frame
# orientation is aligned with world frame
qf = np.array([1, 0, 0, 0.])
# position of origin in the world frame
body_rootid = mj_model.body_rootid[bodyId]
pf = mj_data.subtree_com[body_rootid, :]
# inverse com frame
pf_inv, qf_inv = np.zeros(3), np.zeros(4)
functions.mju_negPose(pf_inv, qf_inv, pf, qf)
# T^com_target
p_ct, q_ct = np.zeros(3), np.zeros(4)
functions.mju_mulPose(p_ct, q_ct, pf_inv, qf_inv, frame_pos, frame_quat)
# q_ct -> mat
mat_ct = np.zeros(9)
functions.mju_quat2Mat(mat_ct, q_ct)
# transform to desired frame
trn_force = force_com.copy()
functions.mju_transformSpatial(trn_force, force_com, 1, p_ct, np.zeros(3),
mat_ct)
# reverse order to get force:torque format
return np.concatenate((trn_force[3:], trn_force[:3]))
def get_geom_pose(model, geom_name):
"""Return the geom pose (relative to parent body).
:param mujoco_py.MjModel model:
:param str geom_name:
:return: position, quaternion
:rtype: tuple(np.array(3), np.array(4))
"""
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
pos = model.geom_pos[geom_id, :]
quat = model.geom_quat[geom_id, :]
return pos, quat
def get_geom_size(model, geom_name):
"""Return the geom size.
:param mujoco_py.MjModel model:
:param str geom_name:
:return: (radius, half-length, _) for cylinder geom, and
(X half-size; Y half-size; Z half-size) for box geom
:rtype: np.array(3)
"""
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
if model.geom_type[geom_id] == MJ_BOX or model.geom_type[
geom_id] == MJ_CYLINDER:
return model.geom_size[geom_id, :]
else:
return None
def get_geom_friction(model, geom_name):
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
return model.geom_friction[geom_id, :]
def get_body_mass(model, body_name):
body_id = functions.mj_name2id(model, MJ_BODY_OBJ, body_name)
return model.body_mass[body_id]
def get_body_pose(model, body_name):
body_id = functions.mj_name2id(model, MJ_BODY_OBJ, body_name)
return model.body_pos[body_id], model.body_quat[body_id]
def get_mesh_vertex_pos(model, geom_name):
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
assert model.geom_type[geom_id] == MJ_MESH
mesh_id = model.geom_dataid[geom_id]
first_vertex_id = model.mesh_vertadr[mesh_id]
no_vertex = model.mesh_vertnum[mesh_id]
vertex_pos = model.mesh_vert[first_vertex_id:first_vertex_id + no_vertex]
return vertex_pos
def set_geom_size(model, geom_name, size):
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
model.geom_size[geom_id, :] = size
def set_body_mass(model, body_name, mass):
body_id = functions.mj_name2id(model, MJ_BODY_OBJ, body_name)
model.body_mass[body_id] = mass
def set_geom_friction(model, geom_name, friction):
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
model.geom_friction[geom_id, :] = friction
def set_body_pose(model, body_name, pos, quat):
body_id = functions.mj_name2id(model, MJ_BODY_OBJ, body_name)
model.body_pos[body_id, :] = pos
model.body_quat[body_id, :] = quat
# -------- GEOMETRY TOOLs
def quat_error(q1, q2):
"""Compute the rotation vector (expressed in the base frame), that if follow
in a unit time, will transform a body with orientation `q1` to
orientation `q2`
:param list/np.ndarray q1: Description of parameter `q1`.
:param list/np.ndarray q2: Description of parameter `q2`.
:return: a 3D rotation vector
:rtype: np.ndarray
"""
if isinstance(q1, list):
q1 = np.array(q1)
if isinstance(q2, list):
q2 = np.array(q2)
dtype = q1.dtype
neg_q1 = np.zeros(4, dtype=dtype)
err_rot_quat = np.zeros(4, dtype=dtype)
err_rot = np.zeros(3, dtype=dtype)
if q1.dot(q2) < 0:
q1 = -q1
functions.mju_negQuat(neg_q1, q1)
functions.mju_mulQuat(err_rot_quat, q2, neg_q1)
functions.mju_quat2Vel(err_rot, err_rot_quat, 1)
return err_rot
def quat2mat(q):
"""Tranform a quaternion to rotation amtrix.
:param type q: Description of parameter `q`.
:return: 3x3 rotation matrix
:rtype: np.array
"""
mat = np.zeros(9)
functions.mju_quat2Mat(mat, q)
return mat.reshape((3, 3))
def pose_transform(p1, q1, p21, q21):
"""Coordinate transformation between 2 frames
:param np.ndarray p1: position in frame 1
:param np.ndarray q1: orientation (quaternion) in frame 1
:param np.ndarray p21: relative position between frame 1 and 2
:param np.ndarray q21: relative orientation between frame 1 and 2
:return: position and orientation in frame 2
:rtype: type
"""
# quat to rotation matrix
R21 = quat2mat(q21)
p2 = p21 + R21.dot(p1)
q2 = np.zeros_like(q1)
functions.mju_mulQuat(q2, q21, q1) # q2 = q21*q1
return p2, q2
def integrate_quat(q, r, dt):
"""Integrate quaternion by a fixed angular velocity over the duration dt.
:param np.array(4) q: quaternion.
:param np.array(3) r: angular velocity.
:param float dt: duration.
:return: result quaternion.
:rtype: np.array(4)
"""
qres = np.zeros(4)
qe = np.zeros(4)
r = r * dt
angle = np.linalg.norm(r)
if angle < 1e-9:
# if angle too small then return current q
return q.copy()
axis = r / angle
functions.mju_axisAngle2Quat(qe, axis, angle)
functions.mju_mulQuat(qres, qe, q)
return qres
def transform_spatial(v1, q21):
"""Coordinate transformation of a spatial vector. The spatial vector can be either
twist (linear + angular velocity) or wrench (force + torque)
:param type v1: Spatial vector in frame 1
:param type q21: transformation matrix (in terms of quaternion)
:return: Description of returned object.
:rtype: type
"""
R21 = quat2mat(q21)
R = np.block([[R21, np.zeros((3, 3))], [np.zeros((3, 3)), R21]])
return R.dot(v1)
def similarity_transform(A1, q21):
"""Similarity transformation of a matrix from frame 1 to frame 2
A2 = R21 * A1 * R12
:param np.array((3, 3)) A1: 3x3 matrix.
:param np.array(4) q21: quaternion representation.
:return: 3x3 matrix
:rtype: np.array
"""
R21 = quat2mat(q21)
return R21.dot(A1.dot(R21.T))
# NOTE: there are infinite rotation vector solutions for a particular
# orientation, the `ref` is to find the closest solution to a reference.
# Is there another minimal representation that could avoid this?
def quat2vec(q, ref=None):
"""Transform quaternion representation to rotation vector representation"""
r = np.zeros(3)
scale = 1
mujoco_py.functions.mju_quat2Vel(r, q, scale)
if ref is not None:
if r.dot(ref) < 0:
angle = np.linalg.norm(r)
r = r / angle
angle = angle - 2 * np.pi
r = r * angle
return r
def inverse_frame(p, q):
pi, qi = np.zeros(3), np.zeros(4)
functions.mju_negPose(pi, qi, p, q)
return pi, qi
def mat2quat(R):
R = R.flatten()
q = np.zeros(4)
mujoco_py.functions.mju_mat2Quat(q, R)
return q
def mul_quat(q1, q2):
q = np.zeros(4)
mujoco_py.functions.mju_mulQuat(q, q1, q2)
return q
| deanpham98/learn-seq | learn_seq/utils/mujoco.py | mujoco.py | py | 9,113 | python | en | code | 3 | github-code | 36 |
31458300119 | import pygame
import jumpingHorses.constants as Constants
from .constants import BLACK_PIECE, WHITE_PIECE, MOVE_COLOR, MOVE_RADIUS, SQUARE_SIZE, WIDTH, HEIGHT, LETTER_GAP_SIZE, OUTLINE_SIZE
from .board import Board
from menu.main_menu import MainMenu
import menu.gameState as GameState
class GameMaster:
#inicializē
def __init__(self, surface):
self._init()
self.surface = surface
#atjaunina to, kas redzams ekrānā
def update(self):
self.board.draw(self.surface)
if self.selectedPiece != None:
self.draw_valid_moves(self.valid_moves)
#sāk no jauna spēli
def reset(self):
self._init()
#notīra esošo stāvokli
def _init(self):
self.selectedPiece = None
self.turn = WHITE_PIECE
self.board = Board()
self.valid_moves = {}
#pārbauda, vai var izvēlēties
def select(self, pos):
if LETTER_GAP_SIZE < pos[0] < WIDTH+LETTER_GAP_SIZE and pos[1] < HEIGHT:
row, col = self.get_row_col_from_mouse(pos)
if self.selectedPiece:#ja kaut kas jau ir izvēlēts
result = self._move(row, col)#tad to pabīda, ja ir legāls gājiens
self.selectedPiece = None
if not result:#ja neidzodas pabīdīt(izvēlas neiespējamu gājienu, vai kaut ko citu), tad selecto pa jaunam
self.selectedPiece = None
self.select(pos)
return True
#ja kaut kas tiek izvēlēts pirmo reizi, vai tika izvēlēts kaut kas, kas nebija iespējams gājiens
piece = self.board.get_piece(row, col)
if piece != 0 and piece.color == self.turn:
self.selectedPiece = piece
self.valid_moves = self.board.get_valid_moves(piece)
return True
self.selectedPiece = None
return False
#pārvieto kauliņu
def _move (self, row, col):
piece = self.board.get_piece(row,col)
if self.selectedPiece and piece == 0 and (row,col) in self.valid_moves:
self.board.move(self.selectedPiece, row, col)
self.change_turn()
else:
return False
return True
#samaina gājienu
def change_turn(self):
self.valid_moves = {}
if self.turn == BLACK_PIECE:
self.turn = WHITE_PIECE
else:
self.turn = BLACK_PIECE
#uzzīmē legālos gājienus
def draw_valid_moves(self, moves):
pygame.draw.circle(self.surface, (0,255,0), (self.selectedPiece.x, self.selectedPiece.y), self.selectedPiece.radius+OUTLINE_SIZE, 5)
for move in moves:
row, col = move
pygame.draw.circle(self.surface, MOVE_COLOR, (col*SQUARE_SIZE + SQUARE_SIZE//2, row*SQUARE_SIZE + SQUARE_SIZE//2) , MOVE_RADIUS)
#atgriež rindu un kolonnu atkarībā no peles pozīcijas
def get_row_col_from_mouse(self, pos):
x, y = pos
row = y // SQUARE_SIZE
col = (x - LETTER_GAP_SIZE) // SQUARE_SIZE
return row, col
#dators veic gājienu
def ai_move(self, board):
self.board = board
self.change_turn()
#pārbauda, vai ir uzvarētājs
def check_winner(self):
if self.board.winner() == Constants.starting_player[1]:
GameState.currentState = GameState.State.win
elif self.board.winner() == Constants.starting_player[2]:
GameState.currentState = GameState.State.lost
def get_board(self):
#print("get_board called")
#self.board.print_board()
return self.board
| perkonss/AI1darbs | jumpingHorses/game_master.py | game_master.py | py | 3,699 | python | en | code | 0 | github-code | 36 |
29116232633 | # -*- coding:utf-8 -*-
# 这道题对于Python来说似乎没有意义
# 在Python中字符串类型是不可变类型,即不可以在原处修改
class Solution:
# s 源字符串
def replaceSpace(self, s):
s = s.replace(' ', '%20')
return s
if __name__=='__main__':
s = Solution()
string = "we are happy"
print(s.replaceSpace(string)) | hushaoqi/The_sword_refers_to_offer | 5替换空格.py | 5替换空格.py | py | 373 | python | en | code | 0 | github-code | 36 |
10794984259 | from absl.testing import absltest
from absl.testing import parameterized
import more_itertools
import tensorflow as tf
from uncertainty_baselines.datasets import datasets
import data_preprocessor # local file import from experimental.language_structure.vrnn
import data_utils # local file import from experimental.language_structure.vrnn
import utils # local file import from experimental.language_structure.vrnn
INPUT_ID_NAME = data_preprocessor.INPUT_ID_NAME
INPUT_MASK_NAME = data_preprocessor.INPUT_MASK_NAME
DIAL_TURN_ID_NAME = data_preprocessor.DIAL_TURN_ID_NAME
class DataPreprocessorTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.batch_size = 2
def create_data_preprocessor(self, max_seq_length, **kwargs):
del max_seq_length # unused
return data_preprocessor.DataPreprocessor(**kwargs)
def load_dataset(self, dataset_name):
dataset_builder = datasets.get(
dataset_name, split='test', add_dialog_turn_id=True)
return dataset_builder.load(batch_size=self.batch_size).prefetch(1)
@parameterized.named_parameters(('multiwoz_synth', 'multiwoz_synth'),
('simdial', 'simdial'),
('sgd_synth', 'sgd_synth'))
def test_output_shape(self, dataset_name):
dataset = self.load_dataset(dataset_name)
dialog_length = data_utils.get_dataset_max_dialog_length(dataset_name)
seq_length = data_utils.get_dataset_max_seq_length(dataset_name)
num_states = data_utils.get_dataset_num_latent_states(dataset_name)
preprocessor = self.create_data_preprocessor(
seq_length, num_states=num_states)
dataset = dataset.map(preprocessor.create_feature_and_label)
(input_1, input_2, label, label_mask, initial_state, initial_sample,
domain_label) = more_itertools.first(dataset)
for inputs in [input_1, input_2]:
for key in [INPUT_ID_NAME, INPUT_MASK_NAME]:
self.assertEqual([self.batch_size, dialog_length, seq_length],
inputs[key].shape.as_list())
for inputs in [label, label_mask, domain_label]:
self.assertEqual([self.batch_size, dialog_length], inputs.shape.as_list())
for inputs in [initial_state, initial_sample]:
self.assertEqual([self.batch_size, num_states], inputs.shape.as_list())
@parameterized.named_parameters(('multiwoz_synth', 'multiwoz_synth'),
('simdial', 'simdial'),
('sgd_synth', 'sgd_synth'))
def test_label_mask_by_dialog_turn_ids(self, dataset_name):
dataset = self.load_dataset(dataset_name)
inputs = more_itertools.first(dataset)
dialog_turn_id_indices = [(0, 2), (1, 3), (1, 5)]
dialog_turn_ids = tf.gather_nd(inputs[DIAL_TURN_ID_NAME],
dialog_turn_id_indices)
seq_length = data_utils.get_dataset_max_seq_length(dataset_name)
num_states = data_utils.get_dataset_num_latent_states(dataset_name)
preprocessor = self.create_data_preprocessor(
seq_length,
num_states=num_states,
labeled_dialog_turn_ids=dialog_turn_ids)
dataset = dataset.map(preprocessor.create_feature_and_label)
(_, _, _, label_mask, _, _, _) = more_itertools.first(dataset)
for i, row in enumerate(label_mask.numpy()):
for j, val in enumerate(row):
if (i, j) in dialog_turn_id_indices:
self.assertEqual(val, 1)
else:
self.assertEqual(val, 0)
class BertDataPreprocessorTest(DataPreprocessorTest):
def create_data_preprocessor(self, max_seq_length, **kwargs):
preprocess_tfhub_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'
bert_preprocess_model = utils.BertPreprocessor(preprocess_tfhub_url,
max_seq_length)
return data_preprocessor.BertDataPreprocessor(bert_preprocess_model,
**kwargs)
if __name__ == '__main__':
absltest.main()
| HeyGF/uncertainty-baselines | experimental/language_structure/vrnn/data_preprocessor_test.py | data_preprocessor_test.py | py | 4,004 | python | en | code | null | github-code | 36 |
38568112529 | import collections
class Solution:
def findLucky(self, arr) :
count = collections.Counter(arr)
max_count = -1
for key in count :
if key == count[key]:
max_count = max(max_count, key)
return max_count
s = Solution()
print(s.findLucky([4,2,3]))
'''
using dictionary : takes longer
sorting
[2,3,4] ??
counter method
>>> array = [1,2,2,3,3,3]
>>> Counter(array)
Counter({3: 3, 2: 2, 1: 1})
>>> c = Counter(array)
>>> c.keys()
[1, 2, 3]
>>> c.values()
[1, 2, 3]
>>> c[3]
3
''' | archanakalburgi/Algorithms | daily_log/27_aug/lucky_num.py | lucky_num.py | py | 561 | python | en | code | 1 | github-code | 36 |
13974889908 | # 1. UPDATE VALUES IN DICTIONARIES AND LISTS
x = [ [5,2,3], [10,8,9] ]
students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'}
]
sports_directory = {
'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],
'soccer' : ['Messi', 'Ronaldo', 'Rooney']
}
z = [ {'x': 10, 'y': 20} ]
def update_values(x, students, sports_directory, z):
# CHANGE THE VALUE 10 IN X TO BE 15
x[1][0] = 15
#CHANGE LAST NAME OF THE FIRST STUDENT
students[0]['last_name'] = 'Bryant'
sports_directory['soccer'][0] = 'Andres'
z[0]['y'] = 30
update_values(x, students, sports_directory, z)
#2 ITERATE THROUGH A LIST OF DICTIONARIES
students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
]
def iterate_dictionary(student):
# LOOP THROUGH THE LIST
print_string = ""
for i in range(len(student)):
# PRINT THE KEYS AND VALUES
for key in student[i]:
print_string += f"{key} - {student[i][key]}"
print(print_string)
print_string = ""
iterate_dictionary(students)
#3 GET VALUES FROM A LIST OF DICTIONARIES
def iterate_dictionary2(key_value, student):
for i in range(len(student)):
if key_value not in student[i]:
print("Key is not in dictionary")
else:
print(student[i][key_value])
iterate_dictionary2('first_name', students)
iterate_dictionary2('last_name', students)
#4
dojo = {
'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],
'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']
}
def print_info(some_dict):
#LOOP THROUGH THE DICTIONARY
for key in some_dict:
#PRINT LENGTH OF THE LIST AND THE KEY
print(f"{len(some_dict[key])} {key.upper()}")
#LOOP TO PRINT EACH ITEM IN THE LIST
for i in range(len(some_dict[key])):
print(some_dict[key][i])
print_info(dojo)
| Brad-Stewart/Projects | functions_intermediate_2.py | functions_intermediate_2.py | py | 2,216 | python | en | code | 0 | github-code | 36 |
43046744636 | import typing
from datetime import datetime
from discord import Activity, ActivityType, Embed, Game, HTTPException, Status, Member, User, TextChannel, VoiceChannel, Role, Invite, Game, Emoji, PartialEmoji, Colour
from discord.ext import commands
from discordbot.botmodules import serverdata, audio
from discordbot.config import EXTENSIONFOLDER, EXTENSIONS, ALL_PREFIXES, MAIN_PREFIXES, DEBUG
from discordbot.utils import chunks
from discordbot.errors import ErrorMessage
from rich.traceback import install as install_traceback
install_traceback()
from rich.pretty import install as install_pretty
install_pretty()
#
CONVERTERS = {
Member: commands.MemberConverter,
User: commands.UserConverter,
TextChannel: commands.TextChannelConverter,
VoiceChannel: commands.VoiceChannelConverter,
Role: commands.RoleConverter,
Invite: commands.InviteConverter,
Game: commands.GameConverter,
Emoji: commands.EmojiConverter,
PartialEmoji: commands.PartialEmojiConverter,
Colour: commands.ColourConverter
}
# Own classes
class MyContext(commands.Context):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.database = serverdata.DjangoConnection(self.author, self.guild)
self.audio = audio.AudioManager(self)
if self.guild is not None:
self.data = serverdata.Server.getServer(self.guild.id)
async def sendEmbed(self, title: str, *args, receiver=None, message: str = "", description: str = "", fields: list = [], **kwargs):
if len(description) > 2048:
desc = list(chunks(description, 2042))
for i in range(len(desc)):
if i == 0:
await (receiver or self).send(message, embed=self.getEmbed(f"{title} ({i+1}/{len(desc)})", *args, description=desc[i]+" [...]", fields=fields, **kwargs))
elif i == len(desc)-1:
return await (receiver or self).send(embed=self.getEmbed(f"{title} ({i+1}/{len(desc)})", *args, description=desc[i], **kwargs))
else:
await (receiver or self).send(embed=self.getEmbed(f"{title} ({i+1}/{len(desc)})", *args, description=desc[i]+" [...]", **kwargs))
elif len(fields) > 25:
flds = list(chunks(fields, 25))
for i in range(len(flds)):
if i == 0:
await (receiver or self).send(message, embed=self.getEmbed(f"{title} ({i+1}/{len(flds)})", *args, description=description, fields=flds[i], **kwargs))
elif i == len(flds)-1:
return await (receiver or self).send(embed=self.getEmbed(f"{title} ({i+1}/{len(flds)})", *args, fields=flds[i], **kwargs))
else:
await (receiver or self).send(embed=self.getEmbed(f"{title} ({i+1}/{len(flds)})", *args, fields=flds[i], **kwargs))
else:
return await (receiver or self).send(message, embed=self.getEmbed(title=title, *args, description=description, fields=fields, **kwargs))
def getEmbed(self, title:str, description:str="", color:int=0x000000, fields:list=[], inline=True, thumbnailurl:str=None, authorurl:str="", authorname:str=None, footertext:str="Angefordert von USER", footerurl:str="AVATARURL", timestamp=False):
EMBED = Embed(title=title[:256], description=description[:2048], color=color or getattr(self.cog, "color", 0x000000))
EMBED.set_footer(text=footertext.replace("USER", str(self.author.name+"#"+self.author.discriminator))[:2048], icon_url=footerurl.replace("AVATARURL", str(self.author.avatar_url)))
if timestamp:
EMBED.timestamp = datetime.utcnow() if timestamp is True else timestamp
for field in fields[:25]:
EMBED.add_field(name=field[0][:256], value=(field[1][:1018]+" [...]" if len(field[1]) > 1024 else field[1]), inline=bool(field[2] if len(field) > 2 else inline))
if thumbnailurl:
EMBED.set_thumbnail(url=thumbnailurl.strip())
if authorname:
if authorurl and ("https://" in authorurl or "http://" in authorurl):
EMBED.set_author(name=authorname[:256], url=authorurl.strip())
else:
EMBED.set_author(name=authorname[:256])
return EMBED
async def tick(self, value=True):
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
await self.message.add_reaction(emoji)
except HTTPException:
pass
async def send_help(self):
await self.invoke(self.bot.get_command("help"), self.invoked_with)
async def invoke_as(self, member, command, *args):
_command = command.replace("_", " ")
cmd = self.bot.get_command(_command)
if cmd is None:
raise ErrorMessage(f"Der Befehl `{ _command }` wurde nicht gefunden! \nPS: Benutze im Command bitte kein Prefix! Für Subcommands, benutze command_subcommand.")
self.message.content = self.prefix+_command+self.message.content.split(command)[1]
self.message.author = member
self.author = member
self.database = type(self.database)(self.author, self.guild)
annotations = cmd.callback.__annotations__
annotations.pop("return", None)
arguments = list(args)
for i, cls in enumerate(annotations.values()):
if len(arguments) > i:
if cls in CONVERTERS:
arguments[i] = await CONVERTERS[cls]().convert(self, arguments[i])
else:
arguments[i] = cls(arguments[i])
await self.invoke(cmd, *arguments)
class MyBot(commands.Bot):
def __init__(self, **kwargs):
super().__init__(self.get_command_prefix, **kwargs)
def get_command_prefix(self, client, message):
if message.guild:
prefixes = MAIN_PREFIXES
else:
prefixes = ALL_PREFIXES
return commands.when_mentioned_or(*prefixes)(client, message)
async def get_context(self, message, *, cls=MyContext):
return await super().get_context(message, cls=cls)
def getEmbed(self, title: str, description: str = "", color: int = 0x000000, fields: list = [], inline=True, thumbnailurl: str = None, authorurl: str = "", authorname: str = None, footertext: str = None, footerurl: str = None, timestamp=False):
EMBED = Embed(title=title[:256], description=description[:2048], color=color)
if footertext:
if footerurl:
EMBED.set_footer(text=footertext[:2048], icon_url=footerurl)
else:
EMBED.set_footer(text=footertext[:2048])
if timestamp:
EMBED.timestamp = datetime.utcnow() if timestamp is True else timestamp
for field in fields:
EMBED.add_field(name=field[0][:256], value=(field[1][:1018]+" [...]" if len(field[1]) > 1024 else field[1]), inline=bool(
field[2] if len(field) > 2 else inline))
if thumbnailurl:
EMBED.set_thumbnail(url=thumbnailurl.strip())
if authorname:
if authorurl and ("https://" in authorurl or "http://" in authorurl):
EMBED.set_author(name=authorname[:256], url=authorurl.strip())
else:
EMBED.set_author(name=authorname[:256])
return EMBED
# create Bot
bot = MyBot(
description='Das ist eine Beschreibung!',
case_insensitive=True,
activity=Activity(type=ActivityType.listening, name=(MAIN_PREFIXES[0] if MAIN_PREFIXES else "/")+"help"),
status=Status.idle,
help_command=None,
strip_after_prefix=True,
)
@bot.before_invoke
async def before_invoke(ctx):
await ctx.trigger_typing()
# Events
from discordbot.botevents import setup
setup(bot)
# Start
def run(TOKEN):
print("[Bot] - Starting with DEBUG="+str(DEBUG))
bot.run(TOKEN, bot=True, reconnect=True)
if __name__ == "__main__":
print("[Bot] - You must run this bot via your manage.py file: python3.8 manage.py run-discorbot")
| AlexeiSur/bot12345 | discordbot/bot.py | bot.py | py | 8,041 | python | en | code | 0 | github-code | 36 |
73603910825 | from flask import Flask
from bitrix24 import *
import pycep_correios
app = Flask(__name__)
@app.route('/<id>', methods=['POST','GET'])
def cep(id):
bx24 = Bitrix24('https://megaisencoes.bitrix24.com.br/rest/XXXX/XXXXXXX/')
dealId = id
chamada= bx24.callMethod("crm.deal.get", id=dealId)
cep1=chamada.get('UF_CRM_5DF0204B5D798')
contactId = chamada.get('CONTACT_ID')
complemento = chamada.get('UF_CRM_5DF0204B50C64')
numero= chamada.get('UF_CRM_5DF0204B42F73')
cpf = chamada.get('UF_CRM_5DF0204BA9076')
rg = chamada.get('UF_CRM_5DF0204BB3CD4')
cep = chr_remove(cep1, ' -.,')
endereco = pycep_correios.get_address_from_cep(cep)
bairro = endereco['bairro']
cidade = endereco['cidade']
rua = endereco['logradouro']
uf = endereco['uf']
cep = endereco['cep']
cep2 = cep[0:5]
zona = getZona(int(cep2))
bx24.callMethod("crm.deal.update", id=dealId, fields={'UF_CRM_1606240753':zona,'UF_CRM_1606228463':uf, 'UF_CRM_5DF0204B93074':bairro,'UF_CRM_5E18F32827B32':rua,'UF_CRM_5DF0204B68A91':cidade,'UF_CRM_5DF0204B5D798':cep})
bx24.callMethod("crm.contact.update", id=contactId, fields={'UF_CRM_5DE6A1384D99D':complemento,'UF_CRM_5DE6A1384016A':numero,'UF_CRM_1575396704':rg,'UF_CRM_1575396694':cpf,'UF_CRM_1606395844':uf, 'UF_CRM_5DE6A139AD7B0':bairro,'UF_CRM_5E2F1DAA04C4B':rua,'UF_CRM_5DE6A13867AD5':cidade,'UF_CRM_5DE6A1385B8FC':cep})
return '<h3>Endereço preenchido</h3>'
def chr_remove(old, to_remove):
new_string = old
for x in to_remove:
new_string = new_string.replace(x, '')
return new_string
def getZona(cep):
if cep > 1000 and cep < 1599:
return "Centro"
elif cep > 2000 and cep < 2999:
return "Zona Norte"
elif cep > 3000 and cep < 3999 or (cep > 8000 and cep < 8499):
return "Zona Leste"
elif cep > 4000 and cep < 4999:
return "Zona Sul"
elif cep > 5000 and cep < 5899:
return "Zona Oeste"
else:
return "Cep não pertence a cidade de São Paulo"
if __name__ == '__main__':
app.run(debug=True) | Gabriandl/Projetos-Mega | addCep/addCep.py | addCep.py | py | 2,148 | python | pt | code | 1 | github-code | 36 |
15538629853 | #!/usr/bin/env python
# coding: utf-8
# In[63]:
from PIL import Image, ImageDraw
import numpy as np
import math
import imageio
from copy import deepcopy
import cv2
def to_integral_image(img_arr):
"""
Calculates the integral image based on this instance's original image data.
"""
row_sum = np.zeros(img_arr.shape)
# we need an additional column and row of padding zeros
integral_image_arr = np.zeros((img_arr.shape[0] + 1, img_arr.shape[1] + 1))
for x in range(img_arr.shape[1]):
for y in range(img_arr.shape[0]):
row_sum[y, x] = row_sum[y-1, x] + img_arr[y, x]
integral_image_arr[y+1, x+1] = integral_image_arr[y+1, x-1+1] + row_sum[y, x]
return integral_image_arr
def sum_region(integral_img_arr, top_left, bottom_right):
"""
Calculates the sum in the rectangle specified by the given tuples.
"""
top_left=(top_left[0]-1,top_left[1]-1)
top_right = (bottom_right[0], top_left[1])
bottom_left = (top_left[0], bottom_right[1])
return integral_img_arr[bottom_right] - integral_img_arr[top_right] - integral_img_arr[bottom_left] + integral_img_arr[top_left]
class HaarFeature(object):
"""
Class representing a haar-like feature.
"""
def __init__(self, feature_type, top_left, bottom_right, threshold, polarity, error, weight, flag):
"""
Creates a new haar-like feature with relevant attributes.
"""
self.type = feature_type
self.top_left = top_left
self.bottom_right = bottom_right
self.width = bottom_right[0]-top_left[0]
self.height = bottom_right[1]-top_left[1]
self.threshold = threshold
self.polarity = polarity
self.error=error
self.weight=weight
self.flag=flag
def get_score(self, int_img):
"""
Get score for given integral image array.
"""
score = 0
if self.type == (1,2):
first = sum_region(int_img, self.top_left, (self.top_left[0] + self.width, int(self.top_left[1] + self.height / 2)))
second = sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 2)), self.bottom_right)
score = first - second
elif self.type == (2,1):
first = sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 2), self.top_left[1] + self.height))
second = sum_region(int_img, (int(self.top_left[0] + self.width / 2), self.top_left[1]), self.bottom_right)
score = first - second
return score
def get_vote(self, int_img):
"""
Get vote of this feature for given integral image, the vote is 1 or -1.
"""
score = self.get_score(int_img)
return 1 if self.polarity * (score-self.threshold) >= 0 else -1
#helper function to sum positive numbers in an array
def sum_positive(array):
s=0
l=len(array)
for i in range(l):
if array[i]>0:
s=s+array[i]
return s
#helper function to sum negative numbers in an array
def sum_negative(array):
s=0
l=len(array)
for i in range(l):
if array[i]<0:
s=s+array[i]
return s
#given an array of lables and weights of each image (label), find the threshold for this weaker learner
def find_threshold(array, weights):
index=1
p=1
l=len(array)
output_error=1
temp=np.multiply(array,weights)
Lp=0
Ln=0
Rp=sum_positive(temp)
Rn=sum_negative(temp)
#try every index
for i in range(1,l):
t=temp[i]
if t>0:
Lp=Lp+t
Rp=Rp-t
else:
Ln=Ln+t
Rn=Rn-t
error=min(Lp+abs(Rn),abs(Ln)+Rp)
if error < output_error:
output_error=error
index = i
if Lp+abs(Rn) < Rp+abs(Ln):
p=1
else:
p=-1
#return the best polarity, the index of the image (whose score will be the threshold),
#and the error of this weak learner
return (index,p,output_error)
def learn(features, images, labels, weights):
"""
This is the mean funtion we use, every time we feed the images, labels, features,
and weigts of the images, it will output the current best weaklearner and set the
parameters.
"""
# select classifiers
lenf = len(features)
leni = len(labels)
fi=0
min_error=1
for i in range(lenf):
temp=np.zeros((leni,3))
for j in range(leni):
img=images[j]
x=features[i].get_score(img)
y=labels[j]
temp[j][0]=x
temp[j][1]=y
temp[j][2]=weights[j]
temp=temp[temp[:,0].argsort()]
#get the labels and weights we need to find the threshold for this feature
tup=find_threshold(temp[:,1],temp[:,2])
index=tup[0]
features[i].threshold=temp[index][0]
features[i].polarity=tup[1]
error=tup[2]
#to record the best feature
if (error < min_error) and (features[i].flag==0):
min_error=error
fi=i
# already find the best feature, update its parameters
# flag indicates whether this feature has already been picked before
features[fi].flag=1
features[fi].error=min_error
# find the weight of this chosen weak learner
if min_error>0:
z=2*(min_error*(1-min_error)) ** (1/2)
a = 0.5 * np.log((1 - min_error) / min_error)
else:
a=2
z=0
features[fi].weight=a
# update the weights of the data (images)
if z!=0:
for i in range(leni):
vote=features[i].get_vote(images[i])
weights[i]=weights[i]*math.exp(-a*labels[i]*vote)/z
# normalize the weights
s=np.sum(weights)
weights=weights/s
return (fi,weights)
# This generates all the features we need
all_features=[]
for i in range(1,65,3):
for j in range(1,65,3):
m1=min(i+16,65)
m2=min(j+16,65)
for k1 in range(i,m1,4):
for h1 in range(j+3,m2,4):
f = HaarFeature((1,2),(i,j),(k1,h1),0,1,0,0,0)
all_features.append(f)
for k2 in range(i+3,m1,4):
for h2 in range(j,m2,4):
f = HaarFeature((2,1),(i,j),(k2,h2),0,1,0,0,0)
all_features.append(f)
# given a classifier, this function outputs whether an image is a face or not according to this classifier
def test(img, classifiers, threshold=3):
l=len(classifiers)
s=0
for i in range(l):
s=s+classifiers[i].weight*classifiers[i].get_vote(img)
if s>=threshold:
return (1,s)
return (-1,s)
# load the images
images=[]
labels=[]
for i in range (1000):
j=i
j=str(j)
im = imageio.imread('Downloads/faces/face'+j+'.jpg')
im=np.array(im).mean(axis=2)
im=to_integral_image(im)
images.append(im)
labels.append(1)
for i in range (1000):
j=i
j=str(j)
im = imageio.imread('Downloads/background/'+j+'.jpg')
im=np.array(im).mean(axis=2)
im=to_integral_image(im)
images.append(im)
labels.append(-1)
cascade=[]
thres=[]
iter=0
for j in range(6):
classifiers=[]
for i in range(5):
l=len(images)
weights=np.full(l,1/l)
t = learn(all_features, images, labels, weights)
p=t[0]
weights=t[1]
classifiers.append(all_features[p])
l=len(images)
mini=0
# set the capital Theta to make sure we correctly classify all faces
for i in range(l):
t=test(images[i],classifiers)
if labels[i]==1:
if t[1]<mini:
mini=t[1]
thres.append(deepcopy(mini))
cascade.append(deepcopy(classifiers))
# elimiate the images we correctly classify as backgrounds
images_temp=[]
labels_temp=[]
for i in range(l):
t=test(images[i],classifiers,mini)
if (labels[i]!=t[0]) or (labels[i]==1):
images_temp.append(images[i])
labels_temp.append(labels[i])
images=deepcopy(images_temp)
labels=deepcopy(labels_temp)
# deal with the situation is before we use up all 5 classifiers, the error has already been 0
iter=iter+1
if len(images)<2:
break
def get_v(cascade, int_img):
"""
helper funtion to avoid overlapping of red patches detecting faces
"""
l=len(cascade)
s=0
for i in range(l):
score = cascade[i].get_score(int_img)
ab=abs(cascade[i].polarity * (score-cascade[i].threshold))
s=s+cascade[i].weight*ab
return s
#slide our cascade to detect faces in the test image
coor=np.zeros((1280,1600))
im1=Image.open('Downloads/test_img.jpg')
#add chanels to the black&white test image, so we can draw "red sqaures" on it
im1=cv2.cvtColor(np.array(im1),cv2.COLOR_GRAY2RGB)
img=Image.fromarray(im1, 'RGB')
im2=Image.open('Downloads/test_img.jpg')
imay=np.array(im2)
draw=ImageDraw.Draw(img)
# determine where we shoud place our pateches
for i in range(0,1216,4):
for j in range(0,1536,4):
y=to_integral_image(imay[i:i+64,j:j+64])
flag=0
for k in range(iter):
t=test(y,cascade[k])
if t[0]==1:
flag=flag+1
if (flag>=6):
v=0
for k in range(iter):
v=v+get_v(cascade[k],y)
coor[i,j]=v
# avoid overlapping
for i in range(0,1216,4):
for j in range(0,1536,4):
ff=1
for z1 in range(-7,8):
for z2 in range(-7,8):
if coor[i,j]<coor[i-4*z1,j-4*z2]:
ff=0
if (ff==1) and coor[i,j]>0:
draw.line([(j,i),(j,i+64)],fill='red')
draw.line([(j,i),(j+64,i)],fill='red')
draw.line([(j+64,i),(j+64,i+64)],fill='red')
draw.line([(j,i+64),(j+64,i+64)],fill='red')
img.save('Downloads/result4.jpg')
| lizihao1999/Viola-Jones | hw4.py | hw4.py | py | 9,985 | python | en | code | 2 | github-code | 36 |
35000637382 |
import numpy as np
def compute_errors(gt, pred):
"""Computation of error metrics between predicted and ground truth depths
Taken from Beyond Image to Depth Github repository
"""
# Select only the values that are greater than zero
mask = gt > 0
pred = pred[mask]
gt = gt[mask]
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
if rmse != rmse:
rmse = 0.0
if a1 != a1:
a1=0.0
if a2 != a2:
a2=0.0
if a3 != a3:
a3=0.0
abs_rel = np.mean(np.abs(gt - pred) / gt)
log_10 = (np.abs(np.log10(gt)-np.log10(pred))).mean()
mae = (np.abs(gt-pred)).mean()
if abs_rel != abs_rel:
abs_rel=0.0
if log_10 != log_10:
log_10=0.0
if mae != mae:
mae=0.0
return abs_rel, rmse, a1, a2, a3, log_10, mae | AmandineBtto/Batvision-Dataset | UNetSoundOnly/utils_criterion.py | utils_criterion.py | py | 995 | python | en | code | 6 | github-code | 36 |
39006259890 |
import os
import anndata
import scanpy as sc
from matplotlib import rcParams
import sccross
rcParams["figure.figsize"] = (4, 4)
PATH = "s01_preprocessing"
os.makedirs(PATH, exist_ok=True)
rna = anndata.read_h5ad("Saunders-2018.h5ad")
met = anndata.read_h5ad("Luo-2017.h5ad")
atac = anndata.read_h5ad("10x-ATAC-Brain5k.h5ad")
rna.layers["raw_count"] = rna.X.copy()
sc.pp.normalize_total(rna)
sc.pp.log1p(rna)
sc.pp.scale(rna, max_value=10)
sc.tl.pca(rna, n_comps=100, use_highly_variable=True, svd_solver="auto")
rna.X = rna.layers["raw_count"]
del rna.layers["raw_count"]
sc.pp.neighbors(rna, n_pcs=100, metric="cosine")
sc.tl.umap(rna)
rna.obs["cell_type"].cat.set_categories([
"Layer2/3", "Layer5a", "Layer5", "Layer5b", "Layer6",
"Claustrum", "CGE", "MGE"
], inplace=True)
met.X = met.layers["norm"].copy()
sc.pp.log1p(met)
sc.pp.scale(met, max_value=10)
sc.tl.pca(met, n_comps=100, use_highly_variable=True, svd_solver="auto")
met.X = met.layers["norm"]
del met.layers["norm"]
sc.pp.neighbors(met, n_pcs=100, metric="cosine")
sc.tl.umap(met)
met.obs["cell_type"].cat.set_categories([
"mL2/3", "mL4", "mL5-1", "mDL-1", "mDL-2", "mL5-2",
"mL6-1", "mL6-2", "mDL-3", "mIn-1", "mVip",
"mNdnf-1", "mNdnf-2", "mPv", "mSst-1", "mSst-2"
], inplace=True)
sccross.data.lsi(atac, n_components=100, use_highly_variable=False, n_iter=15)
sc.pp.neighbors(atac, n_pcs=100, use_rep="X_lsi", metric="cosine")
sc.tl.umap(atac)
atac.obs["cell_type"].cat.set_categories([
"L2/3 IT", "L4", "L5 IT", "L6 IT", "L5 PT",
"NP", "L6 CT", "Vip", "Pvalb", "Sst"
], inplace=True)
fig = sc.pl.umap(atac, color="cell_type", title="scATAC-seq cell type", return_fig=True)
fig.savefig(f"{PATH}/atac_ct.pdf")
atac2rna = sccross.data.geneActivity(atac)
rna.write("rna_preprocessed.h5ad", compression="gzip")
met.write("met_preprocessed.h5ad", compression="gzip")
atac.write("atac_preprocessed.h5ad", compression="gzip")
atac2rna.write("atac2rna.h5ad", compression="gzip")
| mcgilldinglab/scCross | data/unmatched_mouse_cortex/preprocess.py | preprocess.py | py | 2,006 | python | en | code | 0 | github-code | 36 |
24922748658 | # Given files of user data scrapped from Yelp on different dates
# Merge the files so that each row represents one user
# and his/her elite status on each date
# For example, Jan 4: Not Elite, Jan 5: Not Elite, Jan 6: Elite
# Find on what date the user's status changed to Elite
import csv
import copy
def combineCityEliteFiles(in_filename1, in_filename2, out_filename, newDate):
with open(in_filename1, 'rU') as in_f1,\
open(in_filename2, 'rU') as in_f2, open(out_filename, 'wb') as out_f:
dataReader1 = csv.reader(in_f1)
dataReader2 = csv.reader(in_f2) #errorFile higher priority
originalList = list(dataReader1)
errorList = list(dataReader2)
dataWriter = csv.writer(out_f)
indexUserURL = -5
indexEliteFlag = -6
errorDict = {}
errorLineDict = {}
for errorLine in errorList:
userURL = errorLine[indexUserURL]
eliteFlag = errorLine[indexEliteFlag]
errorDict[userURL] = eliteFlag
errorLineDict[userURL] = errorLine
copyErrorDict = copy.deepcopy(errorDict)
for line1 in originalList:
userURL = line1[indexUserURL]
newEliteFlag = errorDict.get(userURL, None)
if "property" in line1[indexUserURL]:
dataWriter.writerow([newDate] + line1)
continue
if newEliteFlag == None:
eliteFlagMissing = "NoData"
dataWriter.writerow([eliteFlagMissing]+line1)
else:
dataWriter.writerow([newEliteFlag]+ line1)
copyErrorDict.pop(userURL, None)
if len(copyErrorDict)!=0:
numberOfEntries = len(originalList[0])
numberOfOthers = 5
numberOfEliteDataMissing = numberOfEntries-numberOfOthers
for errorUserURL in copyErrorDict.keys():
errorLine = errorLineDict[errorUserURL]
errorEliteFlag = errorDict[errorUserURL]
if "property" in errorLine[indexUserURL]:
continue
newLine = [errorEliteFlag]+['Missing']*numberOfEliteDataMissing+\
errorLine[-5:]
dataWriter.writerow(newLine)
return
def main():
combineCityEliteFiles("mergeJan91011Jan12.csv","Jan8/EliteFlag/cityLabelCombinedEliteFlagJan 8 - Tampa to Cleveland.csv", "mergeJan891011Jan12.csv","Jan8")
combineCityEliteFiles("mergeJan891011Jan12.csv","Jan5/EliteFlag/cityLabelCombinedEliteFlagJan 5 - Tampa to Cleveland.csv", "mergeJan5891011Jan12.csv","Jan5")
combineCityEliteFiles("mergeJan5891011Jan12.csv","Jan4/EliteFlag/cityLabelCombinedEliteFlagJan 4 - Tampa to Cleveland.csv", "mergeJan45891011Jan12.csv","Jan4")
if __name__ == "__main__":
main()
| heejokeum/MitSloanYelpResearch | mergeDifferentDates.py | mergeDifferentDates.py | py | 2,833 | python | en | code | 0 | github-code | 36 |
4957095145 | import pytest
@pytest.mark.asyncio
async def test_healthcheck(test_client_rest):
response = await test_client_rest.get("http://test/healthcheck")
assert response.status_code == 200
data = response.json()
assert data == {"status": "ok"}
| riuzaver/market-temp-test | {{cookiecutter.project_name}}/tests/test_healthcheck.py | test_healthcheck.py | py | 255 | python | en | code | 0 | github-code | 36 |
32017330441 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import numpy as np
import math
if __name__ == '__main__':
SZ = 64 # taille de l'image
SP = int(SZ / 2) # Coordonnee max/min
im = np.zeros((SZ, SZ), np.uint8) # Image comme un tableau
all_i = range(-SP, SP)
all_j = range(-SP, SP)
# fp periode sur une image
fp = 4.0 # 1.0;2.0;4.0;8.0;16.0
fr = fp / SZ # frequence reduite
for i in all_i:
for j in all_j:
im[i, j] = 128 + 128 * math.sin(2 * 3.14 * fr * i)
plt.figure(1)
plt.clf()
plt.imshow(im, cmap=plt.cm.gray)
IG, JG = np.meshgrid(all_i, all_j)
fig = plt.figure(2)
plt.clf()
ax = Axes3D(fig)
ax.plot_surface(IG, JG, im, rstride=1, cstride=1, cmap=cm.jet)
plt.show() | JuIngong/TPTelecom | Signaux/TP2/tp2_2.py | tp2_2.py | py | 815 | python | en | code | 0 | github-code | 36 |
27254983972 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
------------------------------------------------------------------------------
Script to obtain uncertainties of heavy mass spectrum and widhts via bootstrap
Authors: A. Ramirez-Morales (andres.ramirez.morales@cern.ch) and
H. Garcia-Tecocoatzi
-----------------------------------------------------------------------------
"""
import sys
import os
from iminuit import Minuit
import numpy as np
import datetime
import pandas as pd
import json
# framework modules
from bottomfw.baryons import data_preparation as dp
from bottomfw.baryons.bottom_three_quark import BottomThreeQuark
workpath = os.getcwd()
# for running batch jobs with htcondor
batch_number = None
run_baryons = None
if len(sys.argv) == 4:
batch_number = sys.argv[1]
workpath = sys.argv[2]
run_baryons = sys.argv[3]
config = None
with open(workpath+"/config/three_quark_config.json", "r") as jsonfile:
config = json.load(jsonfile)
if config is not None:
if run_baryons is None:
run_baryons = config["baryons"]
n_events = config["n_events"]
asymmetric = config["asymmetric_errors"]
decay_width = config["decay_width"]
decay_width_em = config["decay_width_em"]
bootstrap = config["bootstrap_mass"]
bootstrap_width = config["bootstrap_st_dec"]
bootstrap_width_em = config["bootstrap_em_dec"]
prev_params = config["previous_param"]
else:
sys.exit('Please provide a configuration file. Try again!')
print('Getting paper results for:', run_baryons)
# input parameters
param_v,param_w,param_x,param_y,param_z,param_q1,param_q2,param_q3,\
param_is_rho,param_is_lam,param_is_omega,param_is_cascade,param_is_sigma = dp.fetch_data_extended()
def model(q1, q2, q3, is_rho, is_lam, is_omega, is_cascade, is_sigma, v, w, x, y, z, m1, m2, m3, k, a, b, e, g):
"""
mass model, m1 == bottom, m2== strange, m3 == light
"""
return q1*m1 + q2*m2 + q3*m3 + \
v*k*np.sqrt(1./(is_rho*(is_omega*m2 + is_cascade*((m2+m3)/2) + is_sigma*m3 ) + \
is_lam*(is_omega*((3*m2*m1)/(2.*m2+m1)) + is_cascade*((1.5*(m2+m3)*m1)/(m1+m2+m3)) + is_sigma*((3.*m3*m1)/(2.*m3+m1)) ) )) + \
w*a + x*b + y*e + z*g
def least_squares(m1, m2, m3, k, a, b, e, g):
# y_var_0 = sigma_0 # best sigma_0=12.47 (bottom)
# yvar_0 = y_var_0*np.ones(16)
# yvar = y_errors_exp
# yvar_2 = np.power(yvar_0, 2) + np.power(yvar, 2)
yvar_2 = 0.001
pred_m = model(param_q1, param_q2, param_q3, param_is_rho, param_is_lam,
param_is_omega, param_is_cascade, param_is_sigma, param_v,
param_w, param_x, param_y, param_z,
m1, m2, m3, k, a, b, e, g)
yval_2 = np.power( (pred_m - exp_m), 2)
return np.sum( np.divide(yval_2, yvar_2) )
def fit(least_squares):
m = Minuit(least_squares, m1=1, m2=1, m3=1, k=0, a=0, b=0, e=0, g=0)#1400, m2=300, m3=250, k=0, a=0, b=0, e=0, g=0)
m.limits['m1'] = (4000, 6000)
m.limits['m2'] = (400, 470)
m.limits['m3'] = (250, 300)
m.errordef=Minuit.LEAST_SQUARES
m.migrad()
return m
def sample_gauss(mu, sigma):
return np.random.normal(mu, sigma, 10000)
def random(sample, random_n=1):
#return np.mean(resample(sample, replace=False, n_samples=1, random_state=random_n))
return np.random.choice(sample, size=None)
# arrays to store the sampled parameters
sampled_k,sampled_a,sampled_b,sampled_e,sampled_g = ([]),([]),([]),([]),([])
sampled_m1,sampled_m2,sampled_m3 = ([]),([]),([])
# arrays to store sampled correlation coeficients
rho_m2m1,rho_m3m1,rho_km1,rho_am1,rho_bm1,rho_em1,rho_gm1 = ([]),([]),([]),([]),([]),([]),([])
rho_m3m2,rho_km2,rho_am2,rho_bm2,rho_em2,rho_gm2,rho_km3 = ([]),([]),([]),([]),([]),([]),([])
rho_am3,rho_bm3,rho_em3,rho_gm3,rho_ak,rho_bk,rho_ek = ([]),([]),([]),([]),([]),([]),([])
rho_gk, rho_ba, rho_ea, rho_ga, rho_eb,rho_gb,rho_ge = ([]),([]),([]),([]),([]),([]),([])
# start bootstrap
start = datetime.datetime.now()
sigma_model = 12.47**2 # to be obtained with optimization (Li.Jin)
# gaussian pdf with the measured value and with experimental and model(sigma_model) uncertainties
# Omegas
gauss_6061 = sample_gauss(6045.2, np.power((1.20**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6316 = sample_gauss(6315.6, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6330 = sample_gauss(6330.3, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6340 = sample_gauss(6339.7, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6350 = sample_gauss(6349.8, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
# Cascade b sextet
gauss_5935 = sample_gauss(5935.0, np.power((0.05**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_5953 = sample_gauss(5953.8, np.power((1.62**2 + sigma_model), 0.5 )) # PDG::Average
gauss_6328 = sample_gauss(6227.4, np.power((1.69**2 + sigma_model), 0.5 )) # PDG::Average (decided to be cascade prime)
# Sigma b
gauss_5813 = sample_gauss(5813.1, np.power((2.55**2 + sigma_model), 0.5 )) # PDG::Average
gauss_5837 = sample_gauss(5832.5, np.power((2.23**2 + sigma_model), 0.5 )) # PDG::Average
gauss_6097 = sample_gauss(6096.9, np.power((2.10**2 + sigma_model), 0.5 )) # PDG::Average
# Lambda b
gauss_5617 = sample_gauss(5619.6, np.power((0.17**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_5912 = sample_gauss(5912.2, np.power((0.17**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_5920 = sample_gauss(5920.1, np.power((0.17**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6146 = sample_gauss(6146.2, np.power((0.40**2 + sigma_model), 0.5 )) # PDG::Direct (not in the fit)
gauss_6152 = sample_gauss(6152.5, np.power((0.40**2 + sigma_model), 0.5 )) # PDG::Direct (not in the fit)
gauss_6070 = sample_gauss(6072.3, np.power((2.90**2 + sigma_model), 0.5 )) # PDG::Direct (not in the fit)
# Cascades b anti-3-plet
gauss_5794 = sample_gauss(5794.5, np.power((2.61**2 + sigma_model), 0.5 )) # PDG::Average
gauss_6100 = sample_gauss(6100.3, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6327 = sample_gauss(6329.9, np.power((2.72**2 + sigma_model), 0.5 )) # LHCB::Average (not in the fit)
# plug here the sigma_0 optimization lines from data_utils.py
count = 0
# construct the simulated sampling distribution (bootstrap technique)
for i in range(n_events): # max 10000 with decays included, computationally expensive
#if(states=='All'):
exp_m = np.array([ # measured baryon masses
# omegas
random(gauss_6061),
random(gauss_6316),
random(gauss_6330),
random(gauss_6340),
random(gauss_6350),
# Cascade
random(gauss_5935),
random(gauss_5953),
random(gauss_6328),
# Sigma b
random(gauss_5813),
random(gauss_5837),
random(gauss_6097),
# Lambda b
random(gauss_5617),
random(gauss_5912),
random(gauss_5920),
# random(gauss_6146),
# random(gauss_6152),
# Cascades
random(gauss_5794),
random(gauss_6100),
# random(gauss_6327)
])
# perform the parameter fitting (via minimizing squared distance)
m = fit(least_squares)
if type(m.covariance) != type(None):
count += 1
else:
continue
sampled_m1 = np.append(sampled_m1, m.values['m1'])
sampled_m2 = np.append(sampled_m2, m.values['m2'])
sampled_m3 = np.append(sampled_m3, m.values['m3'])
sampled_k = np.append(sampled_k, m.values['k'])
sampled_a = np.append(sampled_a, m.values['a'])
sampled_b = np.append(sampled_b, m.values['b'])
sampled_e = np.append(sampled_e, m.values['e'])
sampled_g = np.append(sampled_g, m.values['g'])
# correlation matrix
corr = m.covariance.correlation()
rho_m2m1 = np.append(rho_m2m1, corr['m2','m1'])
rho_m3m1 = np.append(rho_m3m1, corr['m3','m1'])
rho_km1 = np.append(rho_km1, corr['k','m1'])
rho_am1 = np.append(rho_am1, corr['a','m1'])
rho_bm1 = np.append(rho_bm1, corr['b','m1'])
rho_em1 = np.append(rho_em1, corr['e','m1'])
rho_gm1 = np.append(rho_gm1, corr['g','m1'])
rho_m3m2 = np.append(rho_m3m2, corr['m3','m2'])
rho_km2 = np.append(rho_km2 , corr['k','m2'])
rho_am2 = np.append(rho_am2 , corr['a','m2'])
rho_bm2 = np.append(rho_bm2 , corr['b','m2'])
rho_em2 = np.append(rho_em2 , corr['e','m2'])
rho_gm2 = np.append(rho_gm2 , corr['g','m2'])
rho_km3 = np.append(rho_km3, corr['k','m3'])
rho_am3 = np.append(rho_am3, corr['a','m3'])
rho_bm3 = np.append(rho_bm3, corr['b','m3'])
rho_em3 = np.append(rho_em3, corr['e','m3'])
rho_gm3 = np.append(rho_gm3, corr['g','m3'])
rho_ak = np.append(rho_ak, corr['a','k'])
rho_bk = np.append(rho_bk, corr['b','k'])
rho_ek = np.append(rho_ek, corr['e','k'])
rho_gk = np.append(rho_gk, corr['g','k'])
rho_ba = np.append(rho_ba, corr['b','a'])
rho_ea = np.append(rho_ea, corr['e','a'])
rho_ga = np.append(rho_ga, corr['g','a'])
rho_eb = np.append(rho_eb, corr['e','b'])
rho_gb = np.append(rho_gb, corr['g','b'])
rho_ge = np.append(rho_ge, corr['g','e'])
print(round(sampled_m1.mean()), "mb", round(sampled_m1.std()) )
print(round(sampled_m2.mean()), "ms", round(sampled_m2.std()) )
print(round(sampled_m3.mean()), "mn", round(sampled_m3.std()) )
print("K", pow(sampled_k.mean(), 2)/(1000**3), "KB", pow(sampled_k.std(), 2)/(1000**3))
print("A", sampled_a.mean(), " PS ", sampled_a.std())
print("B", sampled_b.mean(), " PSL ", sampled_b.std())
print("E", sampled_e.mean(), " PI ", sampled_e.std())
print("G", sampled_g.mean(), " PF ", sampled_g.std())
# save bootstrap results
df = pd.DataFrame({"M1" : sampled_m1,"M2" : sampled_m2,"M3" : sampled_m3,
"K" : sampled_k, "A" : sampled_a,
"B": sampled_b, "E" : sampled_e, "G" : sampled_g})
if batch_number is None:
if not os.path.exists(workpath+"/tables/"):
os.makedirs(workpath+"/tables/")
df.to_csv(workpath+"/tables/bootstrap_param_"+run_baryons+".csv", index=False)
else:
if not os.path.exists(workpath+"/batch_results/"+run_baryons+"/parameters/"):
os.makedirs(workpath+"/batch_results/"+run_baryons+"/parameters/")
df.to_csv(workpath+"/batch_results/"+run_baryons+"/parameters/"+str(batch_number)+".csv", index=False)
# create dictionaries
param = {'q1':param_q1, 'q2':param_q2, 'q3':param_q3,'is_rho':param_is_rho, 'is_lam':param_is_lam,'is_omega':param_is_omega,
'is_cascade':param_is_cascade, 'is_sigma':param_is_sigma,'V':param_v, 'W':param_w, 'X':param_x, 'Y':param_y, 'Z':param_z}
sampled = {'sampled_m1':sampled_m1,'sampled_m2':sampled_m2,'sampled_m3':sampled_m3,'sampled_k':sampled_k,
'sampled_a':sampled_a, 'sampled_b':sampled_b, 'sampled_e':sampled_e, 'sampled_g':sampled_g}
corr_mat_ext ={'rho_m2m1':rho_m2m1, 'rho_m3m1':rho_m3m1, 'rho_km1':rho_km1, 'rho_am1':rho_am1, 'rho_bm1':rho_bm1, 'rho_em1':rho_em1, 'rho_gm1':rho_gm1,
'rho_m3m2':rho_m3m2, 'rho_km2':rho_km2, 'rho_am2':rho_am2, 'rho_bm2':rho_bm2, 'rho_em2':rho_em2, 'rho_gm2':rho_gm2, 'rho_km3':rho_km3,
'rho_am3':rho_am3, 'rho_bm3':rho_bm3, 'rho_em3':rho_em3, 'rho_gm3':rho_gm3, 'rho_ak':rho_ak, 'rho_bk':rho_bk, 'rho_ek':rho_ek,
'rho_gk':rho_gk, 'rho_ba':rho_ba, 'rho_ea':rho_ea, 'rho_ga':rho_ga, 'rho_eb':rho_eb, 'rho_gb':rho_gb, 'rho_ge':rho_ge}
df = pd.DataFrame(corr_mat_ext)
if batch_number is None:
if not os.path.exists(workpath+"/tables/"):
os.makedirs(workpath+"/tables/")
df.to_csv(workpath+"/tables/bootstrap_correlation_"+run_baryons+".csv", index=False)
else:
if not os.path.exists(workpath+"/batch_results/"+run_baryons+"/correlation/"):
os.makedirs(workpath+"/batch_results/"+run_baryons+"/correlation/")
df.to_csv(workpath+"/batch_results/"+run_baryons+"/correlation/"+str(batch_number)+".csv", index=False)
# calculate masses and widths using the bootstraped fitted parameters
results = BottomThreeQuark(baryons=run_baryons, params=param, sampled=sampled, corr_mat=corr_mat_ext, asymmetric=asymmetric,
decay_width=decay_width, bootstrap_width=bootstrap_width, decay_width_em=decay_width_em, bootstrap_width_em=bootstrap_width_em, batch_number=batch_number, workpath=workpath)
results.fetch_values()
results.paper_results_predictions(bootstrap=bootstrap, bootstrap_width=bootstrap_width, prev_params=prev_params)
end = datetime.datetime.now()
elapsed_time = end - start
print(count, "no. successes")
print("Elapsed total time = " + str(elapsed_time))
| Ailierrivero/bottom-baryonsFW-copy | scripts/bootstrap_three_quark.py | bootstrap_three_quark.py | py | 12,649 | python | en | code | 0 | github-code | 36 |
25314485217 | import sys
input = lambda : sys.stdin.readline()
N, M = map(int,input().split())
P = [input().split() for _ in range(N)]
def bs(rank, cnt):
start, end = 0, len(rank) -1
res = 0
while start <= end:
mid = (start+end) //2
if int(rank[mid][1]) >= cnt:
end = mid -1
res = mid
else:
start = mid+1
return res
for i in range(M):
cnt = int(input())
print(P[bs(P,cnt)][0]) | soohi0/Algorithm_study | 4월_2주/BOJ_IF문좀대신써줘/BOJ_IF문좀대신써줘_염성현.py | BOJ_IF문좀대신써줘_염성현.py | py | 451 | python | en | code | 0 | github-code | 36 |
29066069667 | """
Given an amount and the denominations of coins available, determine how many ways change can be made for amount.
There is a limitless supply of each coin type.
"""
def getWays(num, coins):
"""
1. 0 Column always 1 since you can make 0 out of no coins. That is 1 way.
2. table[col][row] = table[col][row-1] + table[col - coins[row-1]]
| 0 | 1 | 2 | 3 | 4 |
---------------------------------
[] | 1 | 0 | 0 | 0 | 0 |
[1] | 1 | 1 | 1 | 1 | 1 |
[1,2] | 1 | 1 | 2 | 2 | 3 |
[1,2,3] | 1 | 1 | 2 | 3 | 4 |
"""
# Make Blank 2D Array representing coin denominations and sums
data = [0] * (num+1) # Create num+1 Columns. when num = 4, make 5 cols
for i in range(num+1):
data[i] = [0] * (len(coins) + 1) # Make num of coins + 1 rows. accounts for []
for i in range(0, len(coins)+1):
data[0][i] = 1 # Fill out first column with all 1s. Always 1 way to make zero for all denominations.
for i in range(1, num+1):
data[i][0] = 0 # Other than [0,0], you can't make any other n with no coins.
for j in range(1, len(coins)+1): # Consider for J coin types
value = data[i][j-1] # Possibe ways when we can't use the Jth coin.
if coins[j-1] <= i: # Cant make a number with coin if coin > number. E.g can't make 4p with 5p coin.
value += data[i - coins[j-1]][j] # All Possible ways using the Jth coin
data[i][j] = value # table[col][row] = table[col][row-1] + table[col - coins[row-1]]
return data[num][len(coins)] # Return bottom right answer. Number of ways to make num with all the coins. | juneadkhan/InterviewPractice | coinChange.py | coinChange.py | py | 1,689 | python | en | code | 1 | github-code | 36 |
43190780850 | from openpyxl.styles import PatternFill, GradientFill
def set_fill_color_green(workbook):
# read
# http://openpyxl.readthedocs.io/en/stable/api/openpyxl.styles.fills.html
ws = workbook.active
a1 = ws['A1']
# 2 different fill types
fill = PatternFill("solid", fgColor="DDDDDD")
fill = GradientFill(stop=("000000", "FFFFFF"))
fill = PatternFill(
fill_type=None,
start_color='FFFFFFFF',
end_color='FF000000')
a1.fill = fill
return a1
| simkimsia/ug-read-write-excel-using-python | examples/c09_2_fill_color/openpyxl/index.py | index.py | py | 498 | python | en | code | 2 | github-code | 36 |
38083296585 | import os
import subprocess
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
import qmatech
from accel import *
class ADXL362(Module):
def __init__(self, platform):
pads = platform.request("spi", 0)
led = platform.request("led0", 0)
# clock source request
clk50 = platform.request("clk50")
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_por = ClockDomain(reset_less=True)
###
self.cd_sys.clk.attr.add("keep")
self.cd_por.clk.attr.add("keep")
# power on rst
rst_n = Signal()
self.sync.por += rst_n.eq(1)
self.comb += [
self.cd_por.clk.eq(clk50),
self.cd_sys.rst.eq(~rst_n),
]
# sys_clk pll setting (target 200MHz)
self.specials += \
Instance("ALTPLL",
p_BANDWIDTH_TYPE="AUTO",
p_CLK0_DIVIDE_BY=1,
p_CLK0_DUTY_CYCLE=50e0,
p_CLK0_MULTIPLY_BY=4,
p_CLK0_PHASE_SHIFT="0",
p_COMPENSATE_CLOCK="CLK0",
p_INCLK0_INPUT_FREQUENCY=20000e0,
p_OPERATION_MODE="NORMAL",
i_INCLK=clk50,
o_CLK=self.cd_sys.clk,
i_ARESET=~rst_n,
i_CLKENA=0x3f,
i_EXTCLKENA=0xf,
i_FBIN=1,
i_PFDENA=1,
i_PLLENA=1,
)
#self.comb += self.cd_sys.clk.eq(clk50)
# Integrate accel core
core = AccelCore(freq=200000000, baud=115200)
self.submodules += core
self.comb += [
core.sck.eq(pads.sclk),
core.mosi.eq(pads.mosi),
pads.miso.eq(core.miso),
core.csn.eq(pads.csn),
led.eq(core.led),
]
platform = qmatech.Platform()
dut = ADXL362(platform)
platform.build(dut)
from litex.build.altera import USBBlaster
prog = USBBlaster()
prog.load_bitstream("build/top.sof")
| kamejoko80/linux-on-litex-vexriscv-legacy | projects/Accel/adxl362_altera.py | adxl362_altera.py | py | 2,019 | python | en | code | 0 | github-code | 36 |
14114202160 | import sys
sys.setrecursionlimit(2000 * 1000)
def sol(n, cnt):
if dp[1] <= cnt or n < 1:
return
dp[n] = cnt
cnt = cnt + 1
if n % 3 == 0:
sol(n // 3, cnt)
if n % 2 == 0:
sol(n // 2, cnt)
sol(n - 1, cnt)
if __name__ == "__main__":
N = int(sys.stdin.readline())
dp = [sys.maxsize for _ in range(N + 1)]
sol(N, 0)
print(dp[1]) | nashs789/JGAlgo | MorningTask/Day_4/Q1463_LIB.py | Q1463_LIB.py | py | 401 | python | en | code | 2 | github-code | 36 |
23517075801 | N = int(input("Antal med grönt kort, N ? "))
M = int(input("Antal utan grönt kort, M ? "))
minutes = 0
if M == 0:
minutes += 20 if N % 2 == 0 else 30
elif M < N:
minutes += 20 if N % 2 == 0 else 30
elif M == N:
minutes += 30
elif M > N:
minutes += 10*(M//N) + 10*(M % N)
minutes += 20
print("Svar:", minutes)
"""
Test
N = 3, (A,B,C)
M = 1, (D)
AD, BC
AB
BA
10min: NM, NN
10min: NN
10min: NN
N = 2, (A,B)
M = 3, (C,D,E)
AC, BD
AE
AB
BA
10min: NM, NM
10min: NN
10min: NN
10min: NM
N = 6, (A,B,C,D,E,F)
M = 2, (G,H)
AG, BH, CD, EF
AB, DC, FE
BA
N = 5 (A,B,C,D,E)
M = 7 (F,G,H,I,J,K,L)
AF, BG, CH, DI, EJ
AK, BL, CD
AE, BC
CA, DB
N = 5 (A,B,C,D,E)
AB, CD
AE, BC
BA
(5,2) = 10
N = 6 (A,B,C,D,E,F)
AB, CD, EF
BA, DC, FE
(6,2) = 15
N = 7 (A,B,C,D,E,F,G)
AB, CD, EF
BA, DC, FE
AG
(7,2) = 21
N = 8 (A,B,C,D,E,F,G,H)
AB, CD, EF, GH
BA, DC, FE, HG
(8,2) = 28
ABC
DEF
Ad, BC
AB, Ce
BA, Cf
AB, Cd
AB
cdef
Ac, Bd
Ae, Bf
AB
BA
ABCD
efgh
Ae, Bf, Cg, Dh
AB, CD
BA, DC
ABC
ef
Ae, Bf
A
ABCDE
fghij
AB, CD, Ef
BA, DC, Eg
Ah, Bi, Cj, De
ABCDE
fghi
AB, Cf, Dg, Eh
Ai, BC, DE
BA, ED,
"""
| Goby56/Prog2-KR | programmerings-olympiaden/2022/uppg3.py | uppg3.py | py | 1,125 | python | en | code | 0 | github-code | 36 |
12435271099 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
import csv
import openpyxl
import json
import time
from datetime import datetime
from random import random
start_time = time.time()
print('scraping the site')
test = []
options = []
answer = ''
image_source = []
choices = None
try:
year_error_occurred = ''
for year in range(1978, 2020):
print('year', year)
for page in range(1, 11):
print('page number', page)
quote_page = 'https://myschool.ng/classroom/mathematics?exam_type=jamb&exam_year='+str(year) + '&page='+str(page)
year_error_occurred = quote_page
r = requests.get(quote_page)
encodedText = r.text.encode("utf-8")
soup = BeautifulSoup(encodedText, 'html.parser')
question = soup.find_all('div', class_='question-desc')
for item in question:
question_id = round(random() * 10000)
#print(item.text.strip())
content = item.text.rstrip().lstrip()
question = content.strip('\n')
next_Sibling = item.find_next_sibling('ul')
link = item.find_next_sibling('a')
img_container = item.find_previous_sibling('div')
image_source = []
if img_container is not None:
images = img_container.findChildren('img')
if images is not None:
for img in images:
image_source.append(img['src'])
#print('link to answer', link['href'])
if link is not None:
link_to_answer = link['href']
encodedText = requests.get(link_to_answer).text.encode("utf-8")
soup = BeautifulSoup(encodedText, 'html.parser')
h5_tag = soup.find('h5', class_='text-success')
#print('-', h5_tag.text.strip())
content = h5_tag.text.rstrip().lstrip()
answer = content.strip('\n')
choices = next_Sibling.findChildren('li')
options = []
if choices is not None:
for node in choices:
#print(node.text.strip())
content = node.text.lstrip().rstrip()
choice = content.strip('\n')
options.append(choice)
test.append({ 'id': question_id, 'year': year, 'examtype': 'Jamb',
'subject': 'Mathematics','qestion': question,'image_asset': image_source,
'options': options, 'answer': answer,
'linkToanswer':link_to_answer, 'source_url': quote_page})
time.sleep(1)
except:
print('error occurred while try to scrap', year_error_occurred)
print('done')
with open('data.json', 'w') as outfile:
json.dump(test, outfile)
print('executed successfully, total execution time: ', (time.time() - start_time))
| charlesinto/quizAppQuestions | index.py | index.py | py | 3,056 | python | en | code | 2 | github-code | 36 |
27053376609 | import pytest
from oddEvenList import Solution
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
@pytest.mark.parametrize("nums, expected", [
([1, 2, 3, 4, 5], [1, 3, 5, 2, 4]),
([2, 1, 3, 5, 6, 4, 7], [2, 3, 6, 7, 1, 5, 4])
])
def test_oddEvenList(nums, expected):
head = current = ListNode(nums[0])
for n in nums[1:]:
current.next = ListNode(n)
current = current.next
actual = Solution().oddEvenList(head)
index = 0
while actual:
assert actual.val == expected[index]
actual = actual.next
index += 1
| ikedaosushi/leetcode | problems/python/tests/test_oddEvenList.py | test_oddEvenList.py | py | 613 | python | en | code | 1 | github-code | 36 |
70387200423 | import os
import torch
import timm
import numpy as np
from simple_network import SimpleNetwork
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import Compose, Normalize, ToTensor, Resize
from torchvision.models import resnet18
#from torch import nn
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class ChestXrayValidator(Executor):
def __init__(self, data_path="/dataset",model_name="resnet18", validate_task_name=AppConstants.TASK_VALIDATION,pretrained_model_path=None):
super().__init__()
self._validate_task_name = validate_task_name
self.pretrained_model_path = pretrained_model_path
if pretrained_model_path and os.path.exists(pretrained_model_path):
state_dict = torch.load(self.pretrained_model_path)
self.model = timm.create_model(model_name, pretrained=False, num_classes=2) # Assuming a binary classification problem in Chest X-ray
self.model.load_state_dict(state_dict, strict=False)
print("Loaded pretrained model from:", pretrained_model_path)
else:
self.model = timm.create_model(model_name, pretrained=True, num_classes=2) # Assuming a binary classification problem in Chest X-ray
# Setup the model
# self.model = resnet18(pretrained=True)
#self.model = torchvision.models.resnet18()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transform = Compose(
[
Resize((224, 224)),
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
val_data_path = os.path.join(data_path,'val')
self._val_dataset = ImageFolder(root=val_data_path, transform=transform)
self._val_loader = DataLoader(self._val_dataset, batch_size=32, shuffle=True,num_workers=4,pin_memory=True)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self._validate(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def _validate(self, weights, abort_signal):
#self.model.load_state_dict(weights)
# change numpy.ndarray to torch.Tensor
weights = {k: torch.from_numpy(v).to(self.device) if isinstance(v, np.ndarray) else v.to(self.device) for k, v in weights.items()}
# creat new state_dict and del fc.weight andfc.bias
new_state_dict = {k: v for k, v in weights.items() if k not in ["fc.weight", "fc.bias"]}
self.model.load_state_dict(new_state_dict, strict=False)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self._val_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
| Hamster-yang/FL_chest-xray_timm | app/custom/chestxrayvalidator.py | chestxrayvalidator.py | py | 5,254 | python | en | code | 0 | github-code | 36 |
5217107857 | import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import contextily as ctx
from adjustText import adjust_text
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
file_path = '/Users/jakewatembach/Desktop/meteoriteLandings/Meteorite_Landings.csv'
df = pd.read_csv(file_path)
df['mass (t)'] = df['mass (g)'] / 1e6
df = df.sort_values(by='mass (t)', ascending=False).head(10)
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df['reclong'], df['reclat'])
)
gdf.crs = 'EPSG:4326'
gdf = gdf.to_crs('EPSG:3857')
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
world = world.to_crs('EPSG:3857')
plt.style.use('dark_background')
plt.rcParams['font.family'] = 'Arial'
fig, ax = plt.subplots(figsize=(20, 10))
ax.set_aspect('equal')
ax.margins(x=0.05)
world.plot(ax=ax, edgecolor='white', linewidth=0.5, facecolor='green', alpha=0.3)
ctx.add_basemap(ax, source=ctx.providers.CartoDB.Positron)
meteorite_img = plt.imread('/Users/jakewatembach/Desktop/meteoriteLandings/meteorite.png')
# Get the dimensions of the meteorite image
img_height, img_width, _ = meteorite_img.shape
# Calculate the offset between the center of the meteorite and the center of the flame behind it
x_offset = int(img_width * 0.26)
y_offset = int(img_height * 0.27)
for x, y in zip(gdf.geometry.x, gdf.geometry.y):
imagebox = OffsetImage(meteorite_img, zoom=0.03)
ab = AnnotationBbox(imagebox, (x - x_offset, y - y_offset), frameon=False, box_alignment=(0.5, 0.5))
ax.add_artist(ab)
texts = []
bbox = ax.get_xlim() + ax.get_ylim()
# Calculate the range of the meteorite locations in x and y directions
meteorites_bbox = gdf.total_bounds
x_range = meteorites_bbox[2] - meteorites_bbox[0]
y_range = meteorites_bbox[3] - meteorites_bbox[1]
# Adjust the label positions to ensure they're within the bounds of the plot
for x, y, name, mass in zip(gdf.geometry.x, gdf.geometry.y, gdf['name'], gdf['mass (t)']):
label = f"{name} ({mass:.2f} t)"
# Calculate the offset of the label from the meteorite
label_offset = (0.03 * x_range, 0.03 * y_range)
if x < bbox[0]:
label_offset = (-0.03 * x_range, label_offset[1])
elif x > bbox[1]:
label_offset = (0.03 * x_range, label_offset[1])
if y < bbox[2]:
label_offset = (label_offset[0], -0.03 * y_range)
elif y > bbox[3]:
label_offset = (label_offset[0], 0.03 * y_range)
# Adjust the label position based on the offset
label_x = x + label_offset[0]
label_y = y + label_offset[1]
# Adjust the label position so that it is within the bounds of the plot
if label_x < bbox[0]:
label_x = bbox[0] + (bbox[1] - bbox[0]) * 0.01
if label_x > bbox[1]:
label_x = bbox[1] - (bbox[1] - bbox[0]) * 0.01
if label_y < bbox[2]:
label_y = bbox[2] + (bbox[3] - bbox[2]) * 0.01
if label_y > bbox[3]:
label_y = bbox[3] - (bbox[3] - bbox[2]) * 0.01
texts.append(ax.text(label_x, label_y, label, fontsize=10, ha='center', va='center', color='white',
bbox=dict(boxstyle="round", fc="black", alpha=0.5)))
adjust_text(texts)
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
meteorites_bbox = gdf.total_bounds
padding_factor = 0.2
x_range = meteorites_bbox[2] - meteorites_bbox[0]
y_range = meteorites_bbox[3] - meteorites_bbox[1]
ax.set_xlim(meteorites_bbox[0] - x_range * padding_factor, meteorites_bbox[2] + x_range * padding_factor)
ax.set_ylim(meteorites_bbox[1] - y_range * padding_factor, meteorites_bbox[3] + y_range * padding_factor)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.title('Top 10 Biggest Meteorite Landings', fontsize=20, color='white')
plt.savefig('meteorite_landings.png', dpi=300, bbox_inches='tight')
plt.show()
| jakeww/meteoriteLandings | landings.py | landings.py | py | 3,775 | python | en | code | 0 | github-code | 36 |
788805808 | import math
import numpy as np
import cv2
import settings
from point import polarvec, Point
class EV3Map:
def __init__(self):
self.settings = settings.SettingsRegistry['global']
self.image = cv2.imread(self.settings.load_map)
self.gray = cv2.cvtColor(self.image, cv2.COLOR_RGB2GRAY)
offset = self.settings.camera_shadow_offset
self.gray[self.gray < offset] = 0
self.gray[self.gray >= offset] -= offset
self.image_w = self.image.shape[1]
self.image_h = self.image.shape[0]
self.world_w = self.settings.world_w
self.world_h = self.settings.world_h
self.calib_w2i = Point(self.image_w / self.world_w,
self.image_h / self.world_h)
self.calib_i2w = Point(self.world_w / self.image_w,
self.world_h / self.image_h)
def _world2image(self, p):
return (int(p.x * self.calib_w2i.x),
int(p.y * self.calib_w2i.y))
def image2world(self, image_x, image_y):
return Point(image_x * self.calib_i2w.x,
image_y * self.calib_i2w.y)
def get_circle(self, world_center, world_radius, angle):
center = self._world2image(world_center)
radius = int(world_radius * self.calib_w2i.x)
patch_size = (radius * 2, radius * 2)
patch = cv2.getRectSubPix(self.gray, patch_size, center)
return patch
def get_rectangle(self, world_center, camera_w, camera_h, angle):
center = self._world2image(world_center)
patch_w = int(camera_w * self.calib_w2i.x)
patch_h = int(camera_h * self.calib_w2i.y)
angle_degree = math.degrees(angle)
patch_size = (patch_w, patch_h)
m = cv2.getRotationMatrix2D(center, angle_degree + 90., 1.)
dst = cv2.warpAffine(self.gray, m, (self.image_w, self.image_h), flags=cv2.INTER_LINEAR)
patch = cv2.getRectSubPix(dst, patch_size, center)
return patch
| pfnet-research/chainer-ev3 | simulator2d/map.py | map.py | py | 1,986 | python | en | code | 8 | github-code | 36 |
37451679140 | import numpy as np
def split_into_reading_frames(seq_obj):
# we get Seq(AAAGGGTTT)
# we return [Seq(AAAGGGTTT), Seq(AAGGGT), Seq(AGGGTT)]
the_split = [seq_obj[x:(len(seq_obj)-x)//3*3+x] for x in range(3)]
return the_split
def find_start_ends(translated_seq_obj):
# we get Seq(Q*MQMM*M**Q)
# we return [(2,6), (4,6), (5,6), (7,8)] - pairs of M and * pos
the_start_ends = []
the_starts = []
the_ends = []
if len(translated_seq_obj) < 1:
return the_start_ends
# find all starts ant ends
for position in range(len(translated_seq_obj)):
if translated_seq_obj[position] == 'M':
the_starts.append(position)
elif translated_seq_obj[position] == '*':
the_ends.append(position)
if len(the_starts) < 1 or len(the_ends) < 1:
return the_start_ends
# make pairs
the_index_to_check_start = 0
the_index_to_check_end = 0
while True:
start = the_starts[the_index_to_check_start]
end = the_ends[the_index_to_check_end]
if end < start:
# take another end
the_index_to_check_end += 1
if the_index_to_check_end >= len(the_ends):
break
continue
the_start_ends.append((start, end))
the_index_to_check_start += 1
if the_index_to_check_start >= len(the_starts):
break
continue
return the_start_ends
def filter_start_end_to_longest_pairs(start_stops):
# we get [(2,6), (4,6), (5,6), (7,8)]
# we return [(2,6), (7, 8)]
the_last_checked_end = 0
the_start_ends = []
for start, stop in start_stops:
if stop == the_last_checked_end:
continue
the_start_ends.append((start, stop))
the_last_checked_end = stop
return the_start_ends
def extract_fragments(sequence_object, start_stops, minimum_length=1):
# we get Seq(Q*MQMM*M**Q), [(2, 6), (7, 8)], 2
# we return [Seq(MQMM), Seq(M)]
the_fragments = []
for start, stop in start_stops:
if stop - start < minimum_length:
continue
the_fragment = sequence_object[start:stop]
the_fragments.append(the_fragment)
return the_fragments
def occurrences(the_haystack, the_needle):
the_count = 0
the_start = 0
while True:
the_start = the_haystack.find(the_needle, the_start) + 1
if the_start > 0:
the_count += 1
else:
return the_count
def count_frequency(arr_where, arr_what):
the_total_frequency = np.zeros(len(arr_what))
for seq in arr_where:
the_length_of_seq = len(seq) - len(arr_what[0]) + 1
for i in range(len(arr_what)):
the_total_frequency[i] += occurrences(seq, arr_what[i]) / the_length_of_seq
return the_total_frequency / len(arr_where)
def calculate_difference_frequencies(numpy_array_frequencies1, numpy_array_frequencies2):
the_freq1 = numpy_array_frequencies1.copy()
the_freq2 = numpy_array_frequencies2.copy()
# substitute zero entries
the_freq1 = (the_freq1 <= 0) * 1e-9 + the_freq1
the_freq2 = (the_freq2 <= 0) * 1e-9 + the_freq2
# f1 / f2 only with entries of >1 values
the_mask = the_freq1 / the_freq2 >= 1
the_ratio1 = the_mask * the_freq1 / the_freq2
the_ratio2 = np.invert(the_mask) * the_freq2 / the_freq1
# rate
the_ratio = the_ratio1 + the_ratio2
# limit max
the_mask = (the_ratio > 4)
the_ratio = np.invert(the_mask) * the_ratio + the_mask * 4
# rate values
# the_thresholds = [3.5, 3.0, 2.6, 2.2, 2.0, 1.8, 1.6, 1.4, 1.3, 1.0]
the_thresholds = [4.0, 3.0, 2.0, 1.3, 1.0]
the_max_weight = -1 * len(the_thresholds) - 1
for the_index in range(len(the_thresholds)):
the_mask = (the_ratio >= the_thresholds[the_index])
the_ratio = the_mask * the_ratio * (the_max_weight + the_index) + the_ratio
the_ratio = the_ratio * -1
# calculate final rate
return the_ratio.mean()-1
| egisxxegis/pirmasBioItLaboras | sequence_tools.py | sequence_tools.py | py | 3,996 | python | en | code | 0 | github-code | 36 |
9853675254 | class BuildStats:
def __init__(self, values):
self.stats_values = values
def less(self, bound):
result=0
resultItems=[]
for item in self.stats_values:
if item < bound:
resultItems.append(item)
result = result + 1
print("There are " + str(result) + " items less than " + str(bound) + " !!")
print("========ITEMS========")
print(resultItems)
print("=====================")
return result, resultItems
def greater(self, bound):
result=0
resultItems=[]
for item in self.stats_values:
if item > bound:
resultItems.append(item)
result = result + 1
print("There are " + str(result) + " items greater than " + str(bound) + " !!")
print("========ITEMS========")
print(resultItems)
print("=====================")
return result, resultItems
def between(self, lBound, uBound):
result=0
resultItems=[]
for item in self.stats_values:
if item < uBound and item > lBound:
resultItems.append(item)
result = result + 1
print("There are " + str(result) + " items between " + str(lBound) + " and " + str(uBound) + " !!")
print("========ITEMS========")
print(resultItems)
print("=====================")
return result, resultItems
class DataCapture:
def __init__(self, initialValues=[]):
self.values = initialValues
def add(self, item):
self.values.append(item)
def show_values(self):
return self.values
def build_stats(self):
return BuildStats(self.values)
class DataCaptureAndStats:
def __init__(self):
initialValues=[1,2,3,4,5,6,7,8,9,0]
self.data_capture=DataCapture(initialValues)
def run(self):
data_capture=self.data_capture
data_capture.add(3)
data_capture.add(1000)
array_values=data_capture.show_values()
print("Values: " + str(array_values))
stats=data_capture.build_stats()
stats.less(4)
stats.greater(4)
stats.between(1, 4)
testProgram=DataCaptureAndStats()
testProgram.run() | AndresLGomezO/TeamInternationalTest | stats.py | stats.py | py | 2,503 | python | en | code | 0 | github-code | 36 |
16848170368 |
from app import app, mail
from . import login_required, bad_request, created_request
from flask import jsonify, request
from db import db, User, Department, Role, UserRoles
from sqlalchemy.exc import IntegrityError
from setting import MAX_USER_PER_PAGE
from dateutil import parser as TimeParser
from secrets import token_urlsafe
from flask_mail import Message
from flask import render_template
import json
import phonenumbers
import smtplib
@app.route('/api/user/email/verify', methods=["POST"])
@login_required()
def email_verify():
data = request.get_json()
if "token" not in data:
return bad_request("Missing token")
user = request.session.user
if user.email_token != data['token']:
return jsonify({
'status': "FAIL",
'err': "INVALID_CODE"
})
try:
user.email = user.new_email
user.new_email = None
user.email_token = None
db.session.commit()
except IntegrityError:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "EXIST_ERR"
})
return jsonify({
'status': "OK"
})
@app.route('/api/user/add', methods=['POST'])
@login_required(role_allow=['manager', 'administrator'])
def add_user() -> jsonify:
"""
This route is dedicated to adding new user to the system
Returns:
jsonify : HTTP response
"""
data = request.get_json()
# Validate the incoming request
if "email" not in data or \
"passwd" not in data or \
"department" not in data or \
"name" not in data or \
"role" not in data:
return bad_request('Missing parameter')
if not data['passwd']:
return bad_request("Missing password")
if (type(data['role']) == str and not data['role'].isdigit()) or \
(type(data['department']) == str and not data['department'].isdigit()):
return bad_request('Invalid argument')
department = db.session.query(Department).filter(
Department.id == int(data['department'])).first()
role = db.session.query(Role).filter(Role.id == int(data['role'])).first()
if not department:
return bad_request('Invalid department id')
if not role:
return bad_request('Invalid role id')
# Creating new user and add to the system
try:
user = User(
username=data['name'],
password=data['passwd'],
department=department.id,
email=data['email']
)
db.session.add(user)
db.session.flush()
except IntegrityError:
db.session.rollback()
return created_request('Account with this email/name has already existed')
# Creating a new role record for this user
new_role = UserRoles(userid=user.id, roleid=role.id)
db.session.add(new_role)
db.session.commit()
return jsonify({
'status': "OK"
})
@app.route('/api/user/delete/<user_id>', methods=['DELETE'])
@login_required(role_allow=['manager', 'administrator'])
def delete_user(user_id: str) -> jsonify:
"""
This route is dedicated to deleting existing user out of the system
Returns:
jsonify : HTTP response
"""
if not user_id.isdigit():
return jsonify({
'status': "FAIL",
'err': "Invalid user id."
})
user = db.session.query(User).filter(User.id == int(user_id)).first()
if not user:
return jsonify({
'status': "FAIL",
'err': "User doesn't exist."
})
if user.id == request.session.user.id:
return jsonify({
'status': "FAIL",
'err': "It's not recommended to delete your own account."
})
db.session.delete(user)
db.session.commit()
return jsonify({
'status': "OK"
})
@app.route('/api/user/list', methods=['POST', "GET"])
@login_required(role_allow=['manager', 'administrator'])
def list_all_users() -> jsonify:
"""
This route is dedicated to listing all existing users inside the system
Returns:
jsonify : HTTP response
"""
data = request.get_json()
if 'page' not in data:
return bad_request('Missing argument')
if type(data['page']) != int:
return bad_request('Invalid page number')
if 'exclude' in data and data['exclude']:
temp = db.session.query(User).filter(
User.id != request.session.user.id)
else:
temp = db.session.query(User)
ret = temp.offset(
data['page']*MAX_USER_PER_PAGE).limit(MAX_USER_PER_PAGE).all()
return jsonify([
{
'id': user.id,
'name': user.username,
'did': user.department_id, # Department id
'email': user.email,
'created': user.created_on
} for user in ret
]), 200
@app.route('/api/user/get/<user_id>', methods=['POST', "GET"])
@login_required(allow_personal_user=True)
def get_user_info(user_id):
if not user_id.isdigit():
return jsonify({
'status': "FAIL",
'err': "Invalid user id"
})
user = db.session.query(User).filter(User.id == int(user_id)).first()
if not user:
return jsonify({
'status': "FAIL",
'err': "User doesn't exist"
})
return jsonify({
'status': "OK",
'data': {
'id': user.id,
'name': user.username,
'did': user.department_id,
'email': user.email,
'created': user.created_on,
'phone': user.phone,
'address': user.address,
'theme': user.theme,
'language': user.language,
'gender': user.gender,
'birthday': user.birthday,
'roles': list([urole.role.name for urole in user.userrole_ref.all()])
}
})
def change_user_info(user, data):
role_list = set(
[urole.role.name for urole in request.session.user.userrole_ref.all()])
# Checking if this is an administrator or manager
is_manager = "manager" in role_list
is_admin = "administrator" in role_list
is_privilege = is_manager or is_admin
if not user:
return bad_request("User with this id doesn't exist")
if "name" in data:
# Updating the username only
try:
user.username = data['name'].strip()
db.session.flush()
except IntegrityError:
db.session.rollback()
return created_request('This username has already existed')
if "email" in data:
# Updating the email only
email = data['email'].strip()
if email != user.email:
check = db.session.query(User.email).filter(
User.email == email).first()
if check:
return jsonify({
'status': "FAIL",
'err': "This email has already been used"
})
code = token_urlsafe(100)
user.email_token = code
user.new_email = email
# Creating a message which send to the user email later
msg = Message('Verify your email',
sender=app.config.get("MAIL_USERNAME"),
recipients=[email])
msg.html = render_template(
'email_verify.html', username=user.username, url=user.craft_verify_url(code))
try:
mail.send(msg)
except smtplib.SMTPRecipientsRefused:
pass
if "phone" in data:
# Updating the phone number only
if data['phone'] != '':
try:
number = phonenumbers.parse(data['phone'])
if not phonenumbers.is_possible_number(number):
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Invalid phone number"
})
user.phone = data['phone'].strip()
db.session.flush()
except phonenumbers.phonenumberutil.NumberParseException:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Missing or invalid region code."
})
except IntegrityError:
db.session.rollback()
return created_request('This phone number has already been used')
if "birthday" in data:
# Updating the birthday only
try:
user.birthday = TimeParser.parse(data['birthday'].strip())
db.session.flush()
except:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Unexpected error occurred while setting birthday."
}), 500
if "gender" in data:
# Updating the gender only
try:
user.gender = data['gender'].strip()
db.session.flush()
except IntegrityError:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Invalid gender."
}), 500
if "address" in data:
# Updating the address only
try:
user.address = data['address'].strip()
db.session.flush()
except:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Unexpected error occurred while setting address."
}), 500
if "passwd" in data:
# Updating the password only
# print(is_privilege)
if not is_privilege or user.id == request.session.user.id:
# If someone who isn't privileged try to change password, there should be a current password for tat
# If a person trying to change their own password, there should be current as well
if "cpass" not in data:
# if there is no confirm password
return bad_request('Missing confirm password.')
if not user.check_password(data['cpass']):
return jsonify({
'status': "FAIL",
'err': "INVALID_PASS"
})
try:
user.set_new_password(data['passwd'].strip())
db.session.flush()
except:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Unexpected error occurred while setting new password."
}), 500
if "theme" in data:
try:
user.theme = data['theme'].strip()
db.session.flush()
except IntegrityError:
db.session.rollback()
return bad_request('Invalid theme')
if "language" in data:
try:
user.language = data['language'].strip()
db.session.flush()
except IntegrityError:
db.session.rollback()
return bad_request('Invalid language')
if "did" in data:
try:
user.department_id = int(data['did'])
db.session.flush()
except IntegrityError:
return jsonify({
'status': "FAIL",
'err': "Invalid department id"
})
db.session.commit()
@app.route('/api/user/<user_id>/role/update', methods=["POST"])
@login_required(role_allow=["manager", "administrator"])
def add_new_role(user_id):
# There has to be {
# "role" : ID_HERE,
# "action" : False = Delete, True =
# }
data = request.get_json()
if "action" not in data or "role" not in data:
return jsonify({
'status': "FAIL",
'err': "Missing parameters."
})
if not user_id.isdigit():
return jsonify({
'status': "FAIL",
'err': "Invalid user id."
})
user = db.session.query(User).filter(User.id == int(user_id)).first()
if (not user):
return jsonify({
'status': "FAIL",
'err': "Invalid user id."
})
else:
if user.id == request.session.user.id:
return jsonify({
'status': "FAIL",
'err': "It's not recommended to change your own roles."
})
check_role = user.userrole_ref.filter(
UserRoles.roleid == data['role']).first()
if data['action']:
# Adding a new role
if check_role:
# Checking if this role has already been added to this user
return jsonify({
'status': "FAIL",
'err': "This role has already been added."
})
try:
new_role = UserRoles(userid=user.id, roleid=data['role'])
db.session.add(new_role)
db.session.commit()
except IntegrityError:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': 'Invalid role id'
})
else:
if check_role:
if user.userrole_ref.count() <= 1:
return jsonify({
'status': "FAIL",
'err': "Unable to delete the last role of this user."
})
db.session.delete(check_role)
db.session.commit()
else:
return jsonify({
'status': "FAIL",
'err': "Role doesn't exist"
})
return jsonify({
'status': "OK"
})
@app.route('/api/user/update/<user_id>', methods=['POST'])
@login_required(allow_personal_user=True)
def update_user_info(user_id):
data = request.get_json()
if not user_id.isdigit():
return jsonify({
'status': "FAIL",
'err': "Invalid user id."
})
user = db.session.query(User).filter(User.id == int(user_id)).first()
ret = change_user_info(user, data)
if ret:
return ret
return jsonify({
'status': "OK"
})
@app.route('/api/user/update', methods=["POST"])
@login_required()
def update_self_info():
data = request.get_json()
ret = change_user_info(request.session.user, data)
if ret:
return ret
return jsonify({
'status': "OK"
})
# @app.route('/api/user/<user_id>/role/update', methods=['POST'])
# @login_required(allow_personal_user=True)
# def update_user_role(user_id):
# if not user_id.isdigit():
# return bad_request('Invalid user id')
# data = request.get_json()
# if "new_role" not in data:
# return bad_request('Missing argument')
# role_list = set(data['new_role'])
# ret = db.session.query(UserRoles).filter(UserRoles.userid == int(user_id)).filter(UserRoles.roleid.in_(
# role_list
# )).all()
# for record in ret:
# role_list.remove(record.roleid)
# try:
# db.session.add_all([
# UserRoles(userid=int(user_id), roleid=new_role) for new_role in role_list
# ])
# db.session.commit()
# except IntegrityError:
# db.session.rollback()
# return bad_request('Invalid role id detected')
# return jsonify({
# 'status': "OK"
# })
@app.route('/api/user/<user_id>/export', methods=["GET", 'POST'])
@login_required(allow_personal_user=True)
def export_user_config(user_id):
if not user_id.isdigit():
return bad_request('Invalid user id')
user = db.session.query(User).filter(User.id == int(user_id)).first()
if not user:
return bad_request('User doesn\'t exist')
return jsonify({
"phone": user.phone,
"theme": f"{user.theme}",
"language": f"{user.language}",
"birthday": user.birthday,
"gender": f"{user.gender}",
"address": f"{user.address}"
})
@app.route('/api/user/<user_id>/import', methods=['POST'])
@login_required(allow_personal_user=True)
def import_user_config(user_id):
if not user_id.isdigit():
return bad_request('Invalid user id')
if not request.files.get('file'):
return bad_request('Missing file')
user = db.session.query(User).filter(User.id == int(user_id)).first()
if not user:
return jsonify({
'status': "FAIL",
'msg': 'User doesn\'t exist'
})
config_file = request.files['file']
try:
content = config_file.read().decode()
json_content = json.loads(content)
for (key, value) in json_content.items():
# Some of this can be null, so i have to write a if clause first
# before executing
print(key, value)
(setattr(user, key, TimeParser.parse(value.strip())
if key == "birthday" else value) if value else None)
db.session.commit()
except IntegrityError:
db.session.rollback()
return jsonify({
'status': "FAIL",
'msg': 'Unable to import, either the name or email may have already existed, or the file is malformed'
})
except:
return jsonify({
'status': "FAIL",
'msg': 'Invalid configuration'
})
return jsonify({
'status': "OK"
})
| hackernese/Idea-Manager | backend/api/routes/user.py | user.py | py | 17,364 | python | en | code | 0 | github-code | 36 |
29224787647 | from flask import Flask, request, render_template, redirect, flash
from flask_sqlalchemy import SQLAlchemy
import requests
import sys
import os
app = Flask(__name__)
app.config.from_pyfile("config.py")
db = SQLAlchemy(app)
def return_data_from_api(city_name_value):
api_id = "24034c2fc253da6475cd74bc0b96cf5a"
api_link = f"http://api.openweathermap.org/data/2.5/weather?q={city_name_value}&APPID={api_id}"
return requests.get(api_link).json()
class Weather(db.Model):
id = db.Column(db.Integer, primary_key=True)
city_name = db.Column(db.String(50), unique=True)
db_path = os.path.join("/", "weather.db")
if not os.access(db_path, os.F_OK):
db.create_all()
@app.route('/', methods=["GET", "POST"])
def index():
if request.method == "GET":
from_db = Weather.query.all()
result = []
for entry in from_db:
# city_id = entry.id
city_name = entry.city_name
dict_with_info = return_data_from_api(city_name)
city = dict_with_info["name"]
temperature = int(dict_with_info["main"]["temp"]) - 273
state = dict_with_info["weather"][0]["main"]
result.append({"city_name": city, "temperature": temperature, "state": state})
return render_template('index.html', info=result, x=from_db)
elif request.method == "POST":
city_name = request.form["city_name"]
if return_data_from_api(city_name)["cod"] == "404":
flash("The city doesn't exist!")
return redirect("/")
q = db.session.query(Weather.city_name).filter(Weather.city_name == city_name)
city_in_db = db.session.query(q.exists()).scalar()
if not city_in_db:
new_entry = Weather(city_name=city_name)
db.session.add(new_entry)
db.session.commit()
else:
flash("The city has already been added to the list!")
return redirect("/")
@app.route('/delete/<city_name>', methods=['GET', 'POST'])
def delete(city_name):
city = db.session.query(Weather).filter(Weather.city_name == city_name).first()
print(city, type(city))
db.session.delete(city)
db.session.commit()
return redirect('/')
# don't change the following way to run flask:
if __name__ == '__main__':
if len(sys.argv) > 1:
arg_host, arg_port = sys.argv[1].split(':')
app.run(host=arg_host, port=arg_port)
else:
app.run()
| artem-chigin/weather_app | program.py | program.py | py | 2,458 | python | en | code | 0 | github-code | 36 |
22354333965 | import os
import re
import typing
import pymysql
import mlrun.utils
class MySQLUtil(object):
dsn_env_var = "MLRUN_HTTPDB__DSN"
dsn_regex = (
r"mysql\+pymysql://(?P<username>.+)@(?P<host>.+):(?P<port>\d+)/(?P<database>.+)"
)
check_tables = [
"projects",
# check functions as well just in case the previous version used a projects leader
"functions",
]
def __init__(self, logger: mlrun.utils.Logger):
self._logger = logger
def wait_for_db_liveness(self, retry_interval=3, timeout=2 * 60):
self._logger.debug("Waiting for database liveness")
mysql_dsn_data = self.get_mysql_dsn_data()
tmp_connection = mlrun.utils.retry_until_successful(
retry_interval,
timeout,
self._logger,
True,
pymysql.connect,
host=mysql_dsn_data["host"],
user=mysql_dsn_data["username"],
port=int(mysql_dsn_data["port"]),
database=mysql_dsn_data["database"],
)
self._logger.debug("Database ready for connection")
tmp_connection.close()
def check_db_has_tables(self):
connection = self._create_connection()
try:
with connection.cursor() as cursor:
cursor.execute(
"SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='mlrun';"
)
if cursor.fetchone()[0] > 0:
return True
return False
finally:
connection.close()
def set_modes(self, modes):
if not modes or modes in ["nil", "none"]:
self._logger.debug("No sql modes were given, bailing", modes=modes)
return
connection = self._create_connection()
try:
self._logger.debug("Setting sql modes", modes=modes)
with connection.cursor() as cursor:
cursor.execute("SET GLOBAL sql_mode=%s;", (modes,))
finally:
connection.close()
def check_db_has_data(self):
connection = self._create_connection()
try:
with connection.cursor() as cursor:
for check_table in self.check_tables:
cursor.execute("SELECT COUNT(*) FROM %s;", (check_table,))
if cursor.fetchone()[0] > 0:
return True
return False
finally:
connection.close()
def _create_connection(self):
mysql_dsn_data = self.get_mysql_dsn_data()
if not mysql_dsn_data:
raise RuntimeError(f"Invalid mysql dsn: {self.get_dsn()}")
return pymysql.connect(
host=mysql_dsn_data["host"],
user=mysql_dsn_data["username"],
port=int(mysql_dsn_data["port"]),
database=mysql_dsn_data["database"],
)
@staticmethod
def get_mysql_dsn_data() -> typing.Optional[dict]:
match = re.match(MySQLUtil.dsn_regex, MySQLUtil.get_dsn())
if not match:
return None
return match.groupdict()
@staticmethod
def get_dsn() -> str:
return os.environ.get(MySQLUtil.dsn_env_var, "")
| mlrun/mlrun | server/api/utils/db/mysql.py | mysql.py | py | 3,216 | python | en | code | 1,129 | github-code | 36 |
69833309223 | # classes and object
# class form:
# name = "adarsh"
# roll_no = "25492"
# branch = "ecs"
# def intro(self): #self maeans vo object jispe ye method/funtion call kiya gya h
# print(f"my name is {self.name} and my roll no. is {self.roll_no}. My branch is {self.branch}")
# a= form()
# a.name="adarsh"
# a.roll_no = "25492"
# a.branch = "ECS"
# print(a.name)
# print(a.roll_no)
# print(a.branch)
# b= form()
# b.name="kashish"
# b.roll_no = "25504"
# b.branch = "ECS"
# print(b.name)
# print(b.roll_no)
# print(a.branch)
# a.intro()
# b.intro()
# constructors
# class form:
# def __init__(self, name, roll_no) -> None:
# self.name= name
# self.roll_no = roll_no
# def info(self):
# print(f"{self.name}'s roll number is {self.roll_no}")
# a=form("adarsh",25492)
# b=form("kashish",25504)
# a.info()
# b.info()
# decorators
# def decorator(hello):
# def a():
# print("good morning")
# hello()
# print("good luck")
# return a
# # @decorator
# def hello():
# print("hello world")
# # hello()
# #or
# decorator(hello)()
# def decorator(add):
# def a(*args,**kwargs):
# print("good morning")
# add(*args,**kwargs)
# print("good luck")
# return a
# @decorator
# def add(c,d):
# print(c+d)
# add(1,2)
# import logging
# def log_function_call(func):
# def decorated(*args, **kwargs):
# logging.info(f"Calling {func.__name__} with args={args}, kwargs={kwargs}")
# result = func(*args, **kwargs)
# logging.info(f"{func.__name__} returned {result}")
# return result
# return decorated
# @log_function_call
# def my_function(a, b):
# print(a + b)
# my_function(1,2)
# getters and setters
class myclass:
def __init__(self, num1, num2):
self._value = num1
self._age = num2
def show(self):
print(f"num1 is {self._value} and num2 is {self._age}")
@property # getter
def valuex10(self):
return (self._value + self._age) * 10
@valuex10.setter # it will set a new value and it will not behave as a funtion
def valuex10(self, new_value):
self._age = new_value + 1
@staticmethod # static method helps up to create a method without self key word
def adarsh(a, b):
return a + b
a = myclass(19, 18)
b = myclass(37, 34)
a.valuex10 = 100 # setting new value by setters
b.valuex10 = 100 # setting new value by setters
a.show()
b.show()
print(a.valuex10)
print(b.valuex10)
print(a.adarsh(1, 4)) # static method
#class variable and instance variable
class form:
college_name= "Dce" #class variable
def __init__(self, name) -> None:
self.name = name
self.roll_no = 89
def info(self):
print(f"{self.name}'s roll number is {self.roll_no} in {self.college_name}")
a = form("adarsh")
a.roll_no = 24 # here roll_no is instance variable which is associated with a
a.college_name="dceggn"
a.info()
b = form("kashish")
b.roll_no = 45
b.info()
# form.info(b) | Adarsh1o1/python-initials | oops/oops_basic.py | oops_basic.py | py | 3,040 | python | en | code | 1 | github-code | 36 |
70118415145 | import webcolors
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from colormap import rgb2hex
import pandas as pd
from scipy.spatial import KDTree
class ColorNames:
WebColorMap = {}
WebColorMap["AliceBlue"] = "#F0F8FF"
WebColorMap["AntiqueWhite"] = "#FAEBD7"
WebColorMap["Aqua"] = "#00FFFF"
WebColorMap["Aquamarine"] = "#7FFFD4"
WebColorMap["Azure"] = "#F0FFFF"
WebColorMap["Beige"] = "#F5F5DC"
WebColorMap["Bisque"] = "#FFE4C4"
WebColorMap["Black"] = "#000000"
WebColorMap["BlanchedAlmond"] = "#FFEBCD"
WebColorMap["Blue"] = "#0000FF"
WebColorMap["BlueViolet"] = "#8A2BE2"
WebColorMap["Brown"] = "#A52A2A"
WebColorMap["BurlyWood"] = "#DEB887"
WebColorMap["CadetBlue"] = "#5F9EA0"
WebColorMap["Chartreuse"] = "#7FFF00"
WebColorMap["Chocolate"] = "#D2691E"
WebColorMap["Coral"] = "#FF7F50"
WebColorMap["CornflowerBlue"] = "#6495ED"
WebColorMap["Cornsilk"] = "#FFF8DC"
WebColorMap["Crimson"] = "#DC143C"
WebColorMap["Cyan"] = "#00FFFF"
WebColorMap["DarkBlue"] = "#00008B"
WebColorMap["DarkCyan"] = "#008B8B"
WebColorMap["DarkGoldenRod"] = "#B8860B"
WebColorMap["DarkGray"] = "#A9A9A9"
WebColorMap["DarkGrey"] = "#A9A9A9"
WebColorMap["DarkGreen"] = "#006400"
WebColorMap["DarkKhaki"] = "#BDB76B"
WebColorMap["DarkMagenta"] = "#8B008B"
WebColorMap["DarkOliveGreen"] = "#556B2F"
WebColorMap["Darkorange"] = "#FF8C00"
WebColorMap["DarkOrchid"] = "#9932CC"
WebColorMap["DarkRed"] = "#8B0000"
WebColorMap["DarkSalmon"] = "#E9967A"
WebColorMap["DarkSeaGreen"] = "#8FBC8F"
WebColorMap["DarkSlateBlue"] = "#483D8B"
WebColorMap["DarkSlateGray"] = "#2F4F4F"
WebColorMap["DarkSlateGrey"] = "#2F4F4F"
WebColorMap["DarkTurquoise"] = "#00CED1"
WebColorMap["DarkViolet"] = "#9400D3"
WebColorMap["DeepPink"] = "#FF1493"
WebColorMap["DeepSkyBlue"] = "#00BFFF"
WebColorMap["DimGray"] = "#696969"
WebColorMap["DimGrey"] = "#696969"
WebColorMap["DodgerBlue"] = "#1E90FF"
WebColorMap["FireBrick"] = "#B22222"
WebColorMap["FloralWhite"] = "#FFFAF0"
WebColorMap["ForestGreen"] = "#228B22"
WebColorMap["Fuchsia"] = "#FF00FF"
WebColorMap["Gainsboro"] = "#DCDCDC"
WebColorMap["GhostWhite"] = "#F8F8FF"
WebColorMap["Gold"] = "#FFD700"
WebColorMap["GoldenRod"] = "#DAA520"
WebColorMap["Gray"] = "#808080"
WebColorMap["Grey"] = "#808080"
WebColorMap["Green"] = "#008000"
WebColorMap["GreenYellow"] = "#ADFF2F"
WebColorMap["HoneyDew"] = "#F0FFF0"
WebColorMap["HotPink"] = "#FF69B4"
WebColorMap["IndianRed"] = "#CD5C5C"
WebColorMap["Indigo"] = "#4B0082"
WebColorMap["Ivory"] = "#FFFFF0"
WebColorMap["Khaki"] = "#F0E68C"
WebColorMap["Lavender"] = "#E6E6FA"
WebColorMap["LavenderBlush"] = "#FFF0F5"
WebColorMap["LawnGreen"] = "#7CFC00"
WebColorMap["LemonChiffon"] = "#FFFACD"
WebColorMap["LightBlue"] = "#ADD8E6"
WebColorMap["LightCoral"] = "#F08080"
WebColorMap["LightCyan"] = "#E0FFFF"
WebColorMap["LightGoldenRodYellow"] = "#FAFAD2"
WebColorMap["LightGray"] = "#D3D3D3"
WebColorMap["LightGrey"] = "#D3D3D3"
WebColorMap["LightGreen"] = "#90EE90"
WebColorMap["LightPink"] = "#FFB6C1"
WebColorMap["LightSalmon"] = "#FFA07A"
WebColorMap["LightSeaGreen"] = "#20B2AA"
WebColorMap["LightSkyBlue"] = "#87CEFA"
WebColorMap["LightSlateGray"] = "#778899"
WebColorMap["LightSlateGrey"] = "#778899"
WebColorMap["LightSteelBlue"] = "#B0C4DE"
WebColorMap["LightYellow"] = "#FFFFE0"
WebColorMap["Lime"] = "#00FF00"
WebColorMap["LimeGreen"] = "#32CD32"
WebColorMap["Linen"] = "#FAF0E6"
WebColorMap["Magenta"] = "#FF00FF"
WebColorMap["Maroon"] = "#800000"
WebColorMap["MediumAquaMarine"] = "#66CDAA"
WebColorMap["MediumBlue"] = "#0000CD"
WebColorMap["MediumOrchid"] = "#BA55D3"
WebColorMap["MediumPurple"] = "#9370D8"
WebColorMap["MediumSeaGreen"] = "#3CB371"
WebColorMap["MediumSlateBlue"] = "#7B68EE"
WebColorMap["MediumSpringGreen"] = "#00FA9A"
WebColorMap["MediumTurquoise"] = "#48D1CC"
WebColorMap["MediumVioletRed"] = "#C71585"
WebColorMap["MidnightBlue"] = "#191970"
WebColorMap["MintCream"] = "#F5FFFA"
WebColorMap["MistyRose"] = "#FFE4E1"
WebColorMap["Moccasin"] = "#FFE4B5"
WebColorMap["NavajoWhite"] = "#FFDEAD"
WebColorMap["Navy"] = "#000080"
WebColorMap["OldLace"] = "#FDF5E6"
WebColorMap["Olive"] = "#808000"
WebColorMap["OliveDrab"] = "#6B8E23"
WebColorMap["Orange"] = "#FFA500"
WebColorMap["OrangeRed"] = "#FF4500"
WebColorMap["Orchid"] = "#DA70D6"
WebColorMap["PaleGoldenRod"] = "#EEE8AA"
WebColorMap["PaleGreen"] = "#98FB98"
WebColorMap["PaleTurquoise"] = "#AFEEEE"
WebColorMap["PaleVioletRed"] = "#D87093"
WebColorMap["PapayaWhip"] = "#FFEFD5"
WebColorMap["PeachPuff"] = "#FFDAB9"
WebColorMap["Peru"] = "#CD853F"
WebColorMap["Pink"] = "#FFC0CB"
WebColorMap["Plum"] = "#DDA0DD"
WebColorMap["PowderBlue"] = "#B0E0E6"
WebColorMap["Purple"] = "#800080"
WebColorMap["Red"] = "#FF0000"
WebColorMap["RosyBrown"] = "#BC8F8F"
WebColorMap["RoyalBlue"] = "#4169E1"
WebColorMap["SaddleBrown"] = "#8B4513"
WebColorMap["Salmon"] = "#FA8072"
WebColorMap["SandyBrown"] = "#F4A460"
WebColorMap["SeaGreen"] = "#2E8B57"
WebColorMap["SeaShell"] = "#FFF5EE"
WebColorMap["Sienna"] = "#A0522D"
WebColorMap["Silver"] = "#C0C0C0"
WebColorMap["SkyBlue"] = "#87CEEB"
WebColorMap["SlateBlue"] = "#6A5ACD"
WebColorMap["SlateGray"] = "#708090"
WebColorMap["SlateGrey"] = "#708090"
WebColorMap["Snow"] = "#FFFAFA"
WebColorMap["SpringGreen"] = "#00FF7F"
WebColorMap["SteelBlue"] = "#4682B4"
WebColorMap["Tan"] = "#D2B48C"
WebColorMap["Teal"] = "#008080"
WebColorMap["Thistle"] = "#D8BFD8"
WebColorMap["Tomato"] = "#FF6347"
WebColorMap["Turquoise"] = "#40E0D0"
WebColorMap["Violet"] = "#EE82EE"
WebColorMap["Wheat"] = "#F5DEB3"
WebColorMap["White"] = "#FFFFFF"
WebColorMap["WhiteSmoke"] = "#F5F5F5"
WebColorMap["Yellow"] = "#FFFF00"
WebColorMap["YellowGreen"] = "#9ACD32"
@staticmethod
def rgbFromStr(s):
# s starts with a #.
r, g, b = int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16)
return r, g, b
@staticmethod
def findNearestWebColorName(R,G,B):
return ColorNames.findNearestColorName(R,G,B, ColorNames.WebColorMap)
@staticmethod
def findNearestColorName(R,G,B, Map):
mindiff = None
for d in Map:
r, g, b = ColorNames.rgbFromStr(Map[d])
diff = abs(R - r) * 256 + abs(G - g) * 256 + abs(B - b) * 256
if mindiff is None or diff < mindiff:
mindiff = diff
mincolorname = d
return mincolorname
def calculate_white(img):
clusters = 3
dc = DominantColors(img, clusters)
colors = dc.dominantColors()
percentage = dc.get_percentage()
r = img.split('/')
category = r[1]
name = r[2]
col = ""
max = 0
maxColor = ""
for i in range(len(colors)):
hex = str(rgb2hex(colors[i][0], colors[i][1], colors[i][2]))
col = col + hex + '(' + str(percentage[i]) + '),'
if max < percentage[i]:
max = percentage[i]
maxColor = colors[i]
col = col [:-1]
maxColor = ColorNames.findNearestWebColorName(maxColor[0],maxColor[1],maxColor[2])
dict = {
"category": category,
"name": name,
"color": col,
"mainColor": maxColor
}
print(dict)
return dict
class DominantColors:
CLUSTERS = None
IMAGE = None
COLORS = None
LABELS = None
def __init__(self, image, clusters=3):
self.CLUSTERS = clusters
self.IMAGE = image
def dominantColors(self):
# read image
from PIL import Image
im = Image.open(self.IMAGE, 'r')
pixel_values = list(im.getdata())
pixels = []
for pv in pixel_values:
if (pv[3] > 0):
pixels.append(pv[:-1])
if len(pixels) == 0:
pixels.append([0,0,0])
img = self.IMAGE
# save image after operations
self.IMAGE = pixels
# using k-means to cluster pixels
diff = 0
done = False
if len(pixels) < self.CLUSTERS:
self.IMAGE = []
for p in pixels:
for r in range(self.CLUSTERS * 10):
self.IMAGE.append(p)
while not done:
try:
kmeans = KMeans(n_clusters=self.CLUSTERS - diff)
kmeans.fit(self.IMAGE)
done = True
except ValueError:
print("------------------------ERROR---------------------------------------" + str(img))
diff = diff + 1
if diff > self.CLUSTERS:
break
# the cluster centers are our dominant colors.
self.COLORS = kmeans.cluster_centers_
# save labels
self.LABELS = kmeans.labels_
# returning after converting to integer from float
return self.COLORS.astype(int)
def get_percentage(self):
from collections import Counter, defaultdict
total = 0
counter = {}
c = Counter(self.LABELS)
for key in sorted(c):
counter[key] = c[key]
for k, v in counter.items():
total = total + v
percentage = {}
for k, v in counter.items():
percentage[k] = v / total * 100
return percentage
import os
images = []
for root, dirs, files in os.walk("test2"):
for dir in dirs:
for root, dirs, files in os.walk("test2/" + dir):
for file in files:
images.append("test2/" + dir + "/" + file)
n = len(images)
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(16)
results = pool.map(calculate_white, images)
results = pd.DataFrame(results)
print(results)
results.to_csv('colors.csv', encoding='utf-8', index=False)
| danydepo/ffs-backend | TrainAndTest/Color/color_detection.py | color_detection.py | py | 10,216 | python | en | code | 0 | github-code | 36 |
24159423427 | import subprocess
import os
import requests
KEY = 'trnsl.1.1.20161216T160124Z.4a07c4b6a2f01566.ade260e6c684818698899fd08a9c15d72faca843'
URL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'
# путь к файлу с текстом;
directory_source = 'Source'
# путь к файлу с результатом;
directory_result = 'Result'
# создаем директорию с результатами перевода
create_directory_out = subprocess.run(['mkdir', '-p', './Result'])
# Получаем список файлов в переменную list_files
list_files = os.listdir(directory_source)
# язык с которого перевести;
def choice_language(file, lang_out):
if file == 'DE.txt':
lang = 'de-'
elif file == 'ES.txt':
lang = 'es-'
else:
lang = 'fr-'
return lang + lang_out
# чтение текста из файла для перевода
def import_text(file):
with open(os.path.join(directory_source, file)) as f:
text = f.readlines()
return text
# Функция перевода
def translate_me(mytext, lang):
"""
YANDEX translation plugin
docs: https://tech.yandex.ru/translate/doc/dg/reference/translate-docpage/
https://translate.yandex.net/api/v1.5/tr.json/translate ?
key=<API-ключ>
& text=<переводимый текст>
& lang=<направление перевода>
& [format=<формат текста>]
& [options=<опции перевода>]
& [callback=<имя callback-функции>]
:param text: <str> text for translation.
:return: <str> translated text.
Args:
mytext:
"""
params = {
'key': KEY,
'text': mytext,
'lang': lang,
}
response = requests.get(URL, params=params)
return response.json()
# запись текста в файл после перевода
def export_text(file, text):
with open(os.path.join(directory_result, file), 'w') as f:
f.write(text)
print('Переведен и сохранен файл ', os.path.join(directory_result, file))
# Пакетный перевод файлов
second_lang = input('Введите язык, на который следует перевести текст файлов, находящихся в папке "Source": ')
for file_name in list_files:
lang_pair = choice_language(file_name, second_lang) # формируем пару языков для параметров перевода
text_for_translate = import_text(file_name) # читаем текст из файла для перевода
json = translate_me(text_for_translate, lang_pair) # переведенный текст
text_after_translate = ' '.join(json['text']) # форматируем переведенный текст
export_text(file_name, text_after_translate) # записываем переведенный текст в файл
| pashkovsky/PY2PM | PY3_2/Homework/translator_files.py | translator_files.py | py | 3,013 | python | ru | code | 0 | github-code | 36 |
9828545427 | word_list = [
'seara',
'deseuri',
'intrebare ',
'paragraf',
'distanta',
'vultur',
'maimuta',
'batran',
'varsta',
'birou',
'schimbare',
'frica',
'instalatie',
'raspuns ',
'limba',
'munca',
'doctor',
'lift',
'ochi',
'iesire',
'echipament',
'afara',
'carte',
'scrie',
'bandaj',
'barca',
'banca',
'salut',
'lumina',
'regiune',
'comanda',
'opera',
'biologie',
'frunza',
'pamant',
'litera',
'caracter',
'serviciu',
'jungla',
'inghetata',
'fier',
'energie',
'pamant',
'eticheta',
'culoare',
'fereastra',
'vacanta',
'figura',
'deget',
'aeroport',
'avion',
'bucurie',
'zapada',
'gradina',
'pericol',
'secret',
]
| zioan/hangman_romanian | words.py | words.py | py | 827 | python | es | code | 0 | github-code | 36 |
34547539160 | import cv2
# 选择第二只摄影机
cap = cv2.VideoCapture(0)
while(True):
# 从摄影机撷取一张影像
ret, frame = cap.read()
cv2.putText(frame, "aaqqqqqqqqqqqq", (0,100), cv2.FONT_HERSHEY_SIMPLEX,1, (0,0,0))
# 显示图片
cv2.imshow('frame', frame)
cv2.waitKey(1)
if cv2.getWindowProperty('frame', cv2.WND_PROP_AUTOSIZE) == -1:
break
# 释放摄影机
cap.release()
# 关闭所有 OpenCV 视窗
cv2.destroyAllWindows() | Amenoimi/Simple_OCR | cv.py | cv.py | py | 456 | python | zh | code | 0 | github-code | 36 |
43346932566 | import datetime
from discord import client, Message
import aiohttp
import random
import time
import typing
import json
import humanize
import discord
from discord.ext import commands
start_time = time.time()
intents = discord.Intents.default()
bot = commands.Bot(command_prefix="!", intents=discord.Intents.all())
ffmpeg_options = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn',
'format': 'bestaudio[ext=m4a]'
}
@bot.event
async def on_guild_join(ctx, guild):
channel = guild.system_channel
if channel is not None:
await channel.send(f'Successfully joined! You can find a command list here - https://v1ss0nd.github.io/discord-help , \nYou have to create "Moderator" role to use bot moderation feature, make sure you gave it to your server moderators!')
@bot.event
async def on_message(message: Message):
if message.author == bot.user: return
user_id = message.author.id
content = message.content
time_ = datetime.datetime.now()
last_seen[user_id] = (content, time_)
last_seen_str = {k: (v[0], v[1].strftime("%Y-%m-%d %H:%M:%S")) for k, v in last_seen.items()}
with open("last_seen.json", "w") as f:
json.dump(last_seen_str, f)
await bot.process_commands(message)
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name='Type "!help" to DM with me to see a list with supported commands'))
@bot.command()
async def stalk(ctx, user: discord.Member):
with open("last_seen.json", "r") as f:
last_seen = json.load(f)
user_id = str(user.id)
if user_id in last_seen:
content, time = last_seen[user_id]
time_dt = datetime.datetime.strptime(time, "%Y-%m-%d %H:%M:%S")
now = datetime.datetime.now()
delta = now - time_dt
delta_str = humanize.precisedelta(delta)
delta_str = delta_str.replace(",", "").strip() + " ago"
await ctx.reply(f"{user.mention} was last seen in chat {delta_str}, their last message: {content}")
else:
await ctx.reply(f"i havent seen any messages from {user.mention}.")
@bot.command()
async def ping(ctx):
uptime = time.time() - start_time
latency = bot.latency * 1000
hours, remainder = divmod(uptime, 3600)
minutes, seconds = divmod(remainder, 60)
await ctx.reply(f'pong! Current uptime is {int(hours)} hours {int(minutes)} minutes {int(seconds)} seconds. Latency is {round(latency)} ms')
bot.remove_command('help')
class CustomHelpCommand(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
embed = discord.Embed(title="https://v1ss0nd.github.io/discord-help", url="https://v1ss0nd.github.io/discord-help")
for page in self.paginator.pages:
embed.description = page
await destination.send(embed=embed)
bot.help_command = CustomHelpCommand()
@bot.command()
@commands.has_role("Moderator")
async def spam(ctx, count: int, *message):
try:
message = " ".join(message)
except ValueError:
return
for i in range(count):
await ctx.send(message)
@bot.command(description="info about provided user")
async def user(ctx, user: typing.Optional[commands.UserConverter] = None):
if user is None:
user = ctx.author
guild = ctx.guild
member = guild.get_member(user.id)
embed = discord.Embed()
embed.title = f"{user.name}#{user.discriminator}"
embed.description = f"{user.mention}"
embed.color = discord.Color.random()
embed.add_field(name="ID", value=user.id)
embed.add_field(name="Created at", value=user.created_at.strftime("%Y-%m-%d %H:%M:%S"))
if member is not None:
embed.add_field(name="Nickname", value=member.nick or "None")
embed.add_field(name="Joined at", value=member.joined_at.strftime("%Y-%m-%d %H:%M:%S"))
embed.add_field(name="Roles", value=", ".join(role.name for role in member.roles[1:]) or "None")
await ctx.reply(embed=embed)
@bot.group()
@commands.has_role("Moderator")
async def role(ctx):
if ctx.invoked_subcommand is None:
await ctx.reply("Please specify a valid subcommand: list, create, delete, give, remove, color, rename")
@role.command()
async def display(ctx, role_name: str):
guild = ctx.guild
role = discord.utils.get(guild.roles, name=role_name)
if role is None:
await ctx.reply(f"Role {role_name} not found")
return
current_hoist = role.hoist
new_hoist = not current_hoist
await role.edit(hoist=new_hoist)
await ctx.reply(f"Separate displaying of {role_name} switched to {new_hoist}.")
@role.command()
async def create(ctx, name, color: discord.Color):
await ctx.guild.create_role(name=name, color=color)
await ctx.reply(f"Created role {name}")
@role.command()
async def delete(ctx, *, name):
role = discord.utils.get(ctx.guild.roles, name=name)
if role:
await role.delete()
await ctx.reply(f"Deleted role {name}")
else:
await ctx.reply(f"Role {name} not found")
@role.command()
async def give(ctx, role: discord.Role, member: discord.Member,):
await member.add_roles(role)
await ctx.reply(f"Gave {role.name} to {member.name}")
@role.command()
async def remove(ctx, role: discord.Role, member: discord.Member, ):
await member.remove_roles(role)
await ctx.reply(f"Removed {role.name} from {member.name}")
@role.command()
async def list(ctx):
rolelist = [role.name for role in ctx.guild.roles]
roles = ", ".join(rolelist)
await ctx.reply(f"{roles}")
@role.command()
async def color(ctx, role: discord.Role, color: discord.Color):
await role.edit(color=color)
await ctx.reply(f"Changed the color of {role.name} to {color}")
@role.command()
async def rename(ctx, role: discord.Role, *, name: str):
await role.edit(name=name)
await ctx.reply(f"Changed the name of {role.mention} to {name}")
@role.command()
async def move(ctx, role_name: str, direction: str):
guild = ctx.guild
role = discord.utils.get(guild.roles, name=role_name)
if role is None:
await ctx.reply(f"Role not found.")
return
if direction not in ["top", "bottom"]:
await ctx.reply(f"Invalid direction. Please use 'top' or 'bottom'.")
return
bot_member = guild.get_member(bot.user.id)
bot_top_role = bot_member.top_role
if direction == "top":
position = bot_top_role.position - 1
else:
position = min(r.position for r in guild.roles if not r.managed) + 1
await role.edit(position=position)
await ctx.reply(f"{role_name} moved to {direction}.")
@bot.group()
@commands.has_role("Moderator")
async def member(ctx):
if ctx.invoked_subcommand is None:
await ctx.reply("Please specify a valid subcommand: mute, ban, unban, kick")
@member.command()
async def ban(ctx, member: discord.Member, *, reason=None):
await member.ban(reason=reason)
await ctx.reply(f"{member} has been banned for {reason}.")
@member.command()
async def unban(ctx, id: int):
user = await client.fetch_user(id)
await ctx.guild.unban(user)
await ctx.reply(f"{user} has been unbanned.")
@member.command()
async def kick(ctx, member: discord.Member, *, reason=None):
await member.kick(reason=reason)
await ctx.reply(f'User {member} has been kicked.')
@member.command()
@commands.has_permissions(manage_messages=True)
async def mute(ctx, member: discord.Member):
role = discord.utils.get(ctx.guild.roles, name="Muted")
guild = ctx.guild
if role not in guild.roles:
perms = discord.Permissions(send_messages=False, speak=False)
await guild.create_role(name="Muted", permissions=perms)
await member.add_roles(role)
await ctx.reply("Successfully created Muted role and assigned it to mentioned user.")
else:
await member.add_roles(role)
await ctx.reply(f"Has been muted {member}")
@bot.command()
async def join(context: commands.Context) -> discord.VoiceProtocol:
if context.author.voice is None:
return await context.reply("You are not in a voice channel.")
channel = context.author.voice.channel
client = context.voice_client
if client is None:
client = await channel.connect()
if client.is_connected() and client.channel != channel:
await client.move_to(channel)
return client
@bot.command()
async def leave(ctx):
guild = ctx.guild
if guild.voice_client is not None:
await guild.voice_client.disconnect()
await ctx.reply(f"Left from the voice channel")
else:
await ctx.reply("I am not in a voice channel.")
@bot.command()
async def play(ctx, path: str, repeat: bool = False):
vc = await join(ctx)
if path.startswith("http"):
song = pafy.new(path)
audio = song.getbestaudio()
source = discord.FFmpegPCMAudio(audio.url)
else:
source = discord.FFmpegPCMAudio(path)
vc.loop = repeat
vc.play(source)
await ctx.reply(f"Playing {path}")
@bot.command()
async def stop(ctx):
vc = ctx.voice_client
if vc and vc.is_connected():
vc.stop()
await ctx.reply("Stopped playing.")
else:
await ctx.reply("There is nothing playing.")
@bot.command()
async def playfile(context: commands.Context, repeat: bool = False) -> None:
client = await join(context)
attachment = context.message.attachments[0]
filename = await download_audio(attachment.url)
client.loop = repeat
client.play(discord.FFmpegPCMAudio(filename))
await context.reply(f"Playing __{attachment.filename.replace('_', ' ')}__")
async def download_audio(url: str) -> str:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.read()
extension = response.url.suffix
filename = f"audio{random.randint(1000, 9999)}{extension}"
with open(filename, "wb") as file:
file.write(content)
return filename
bot.run('TOKEN')
| v1ss0nd/vsndbot-Discord | bot.py | bot.py | py | 9,975 | python | en | code | 0 | github-code | 36 |
21216189520 | import math
def is_same(p1, p2):
return p1[0] == p2[0] and p1[1] == p2[1]
def mod_inverse(a, p):
return pow(a,p-2,p)
def add_points(p, q, curve, mod):
if (is_same(p, (math.inf, math.inf))):
return q
if (is_same(q, (math.inf, math.inf))):
return p
if (p[0] == q[0] and p[1] == -q[1]):
return (math.inf, math.inf)
if (is_same(p, q)):
lm = (3 * pow(p[0], 2) + curve[0]) * mod_inverse(2 * p[1], mod)
else:
lm = (q[1] - p[1]) * mod_inverse((q[0] - p[0]), mod)
rx = (pow(lm, 2) - p[0] - q[0]) % mod
ry = (lm * (p[0] - rx) - p[1]) % mod
return (rx, ry)
P = (493, 5564)
Q = (1539, 4742)
R = (4403,5202)
C = (497, 1768)
MOD = 9739
print(add_points(add_points(add_points(P, P, C, MOD), Q,C,MOD), R, C, MOD)) | marek-hradil/cryptohack-rubbish | PointAddition/main.py | main.py | py | 792 | python | en | code | 0 | github-code | 36 |
19150802136 | '''
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
class ShallowResNetBlock(torch.nn.Module):
'''
input -> Conv3x3 -> BN -> ReLU -> Conv3x3 -> BN + ShortCut -> Relu
| |
------------- (Conv1x1 -> BN ->) -----------------
'''
def __init__(self, in_channel, out_channel, downsample=False):
super(ShallowResNetBlock, self).__init__()
# main branch
# block1
if downsample:
self.conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=2, padding=1, bias=False)
else:
self.conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=3, padding=1, bias=False)
self.bn1 = torch.nn.BatchNorm2d(out_channel)
self.relu1 = torch.nn.ReLU(inplace=True)
# block2
self.conv2 = torch.nn.Conv2d(out_channel, out_channel, kernel_size=3, padding=1, bias=False)
self.bn2 = torch.nn.BatchNorm2d(out_channel)
# shortcut
# if the main branch is downsampled the shortcut branch will be downsampled (use conv1x1) too
if downsample:
self.shortcut_conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=2, bias=False)
self.shortcut_bn1 = torch.nn.BatchNorm2d(out_channel)
elif in_channel != out_channel:
self.shortcut_conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=1, bias=False)
self.shortcut_bn1 = torch.nn.BatchNorm2d(out_channel)
else:
self.shortcut_conv1 = None
self.shortcut_bn1 = None
# merge
self.relu_out = torch.nn.ReLU(inplace=True)
def forward(self, inputs):
# main
main = self.conv1(inputs)
main = self.bn1(main)
main = self.relu1(main)
main = self.conv2(main)
main = self.bn2(main)
# shortcut
if self.shortcut_conv1 is not None:
shortcut = self.shortcut_conv1(inputs)
shortcut = self.shortcut_bn1(shortcut)
else:
shortcut = inputs
# merge
outs = self.relu_out(main+shortcut)
return outs
class DeepResNetBlock(torch.nn.Module):
'''
input -> Conv1x1 -> BN -> ReLU -> Conv3x3 -> BN -> ReLU -> Conv1x1 -> BN + ShortCut -> Relu
| |
-------------------------- (Conv1x1 -> BN ->) ---------------------------
'''
def __init__(self, in_channel, out_channel, downsample=False):
super(DeepResNetBlock, self).__init__()
# main branch
mid_channel = int(out_channel / 4)
# block1 (in_channel -> mid_channel)
self.conv1 = torch.nn.Conv2d(in_channel, mid_channel, kernel_size=1, bias=False)
self.bn1 = torch.nn.BatchNorm2d(mid_channel)
self.relu1 = torch.nn.ReLU(inplace=True)
# block2
if downsample:
self.conv2 = torch.nn.Conv2d(mid_channel, mid_channel, kernel_size=3, padding=1, stride=2, bias=False)
else:
self.conv2 = torch.nn.Conv2d(mid_channel, mid_channel, kernel_size=3, padding=1, bias=False)
self.bn2 = torch.nn.BatchNorm2d(mid_channel)
self.relu2 = torch.nn.ReLU(inplace=True)
# block3 (mid_channel -> out_channel)
self.conv3 = torch.nn.Conv2d(mid_channel, out_channel, kernel_size=1, bias=False)
self.bn3 = torch.nn.BatchNorm2d(out_channel)
# shortcut
if downsample:
self.shortcut_conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=2, bias=False)
self.shortcut_bn1 = torch.nn.BatchNorm2d(out_channel)
elif in_channel != out_channel:
self.shortcut_conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=1, bias=False)
self.shortcut_bn1 = torch.nn.BatchNorm2d(out_channel)
else:
self.shortcut_conv1 = None
self.shortcut_bn1 = None
# merge
self.relu_out = torch.nn.ReLU(inplace=True)
def forward(inputs):
# main
main = self.conv1(inputs)
main = self.bn1(main)
main = self.relu1(main)
main = self.conv2(main)
main = self.bn2(main)
main = self.relu2(main)
main = self.conv3(main)
main = self.bn3(main)
# shortcut
if self.shortcut_conv1 is not None:
shortcut = self.shortcut_conv1(inputs)
shortcut = self.shortcut_bn1(shortcut)
else:
shortcut = inputs
# merge
outs = self.relu_out(main+shortcut)
# SHALLOW_BLOCK = 0
# DEEP_BLOCK = 1
class ResNet(torch.nn.Module):
def __init__(self, input_size, res_out_channel, num_classes, resnet_type, **kargs):
'''
Args:
input_size: input pic size (batch size not included, eg: (3,32,32))
res_out_channel: out_channel of res block, 512 (resnet18/34) or 2048 (resnet > 50)
type:
"7x7": first conv block is 7x7 with two downsample layers (Conv2d layer and MaxPool2d layer)
"3x3": first conv block is 3x3 and there is no downsample layer in begin
'''
super(ResNet, self).__init__(**kargs)
self.input_size = input_size
self.type = resnet_type
if resnet_type == "7x7":
# stage 1
self.conv1 = torch.nn.Conv2d(input_size[0], 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = torch.nn.BatchNorm2d(64)
self.relu1 = torch.nn.ReLU(inplace=True)
self.pool1 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# stage 6
self.pool2 = torch.nn.AvgPool2d(int(input_size[1]/32))
self.flatten = torch.nn.Flatten()
self.linear1 = torch.nn.Linear(res_out_channel, num_classes)
elif resnet_type == "3x3":
# stage 1
self.conv1 = torch.nn.Conv2d(input_size[0], 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = torch.nn.BatchNorm2d(64)
self.relu1 = torch.nn.ReLU(inplace=True)
# self.pool1 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# stage 6
self.pool2 = torch.nn.AvgPool2d(int(input_size[1]/8))
self.flatten = torch.nn.Flatten()
self.linear1 = torch.nn.Linear(res_out_channel, num_classes)
else:
raise RuntimeError("Invalid resnet type")
# stage 2~5
self.block1 = None
self.block2 = None
self.block3 = None
self.block4 = None
def forward(self, inputs):
# stage 1
outputs = self.conv1(inputs)
outputs = self.bn1(outputs)
outputs = self.relu1(outputs)
if self.type == "7x7":
outputs = self.pool1(outputs)
# stage 2-5
outputs = self.block1(outputs)
outputs = self.block2(outputs)
outputs = self.block3(outputs)
outputs = self.block4(outputs)
# stage 6
outputs = self.pool2(outputs)
outputs = self.flatten(outputs)
outputs = self.linear1(outputs)
return outputs
class ShallowResNet(ResNet):
def __init__(self, input_size, num_classes, block_num_list, resnet_type="3x3", **kargs):
super(ShallowResNet, self).__init__(input_size, 512, num_classes, resnet_type, **kargs)
# assert
for i in range(4):
assert block_num_list[i] > 0, "block num needs greater than 0!"
# stage 2
block1_list = []
for i in range(block_num_list[0]):
block1_list.append(ShallowResNetBlock(64,64))
self.block1 = torch.nn.Sequential(*block1_list)
# stage 3
block2_list = []
for i in range(block_num_list[1]):
if i == 0:
block2_list.append(ShallowResNetBlock(64, 128, downsample=True))
else:
block2_list.append(ShallowResNetBlock(128, 128))
self.block2 = torch.nn.Sequential(*block2_list)
# stage 4
block3_list = []
for i in range(block_num_list[2]):
if i == 0:
block3_list.append(ShallowResNetBlock(128, 256, downsample=True))
else:
block3_list.append(ShallowResNetBlock(256, 256))
self.block3 = torch.nn.Sequential(*block3_list)
# stage 5
block4_list = []
for i in range(block_num_list[3]):
if i == 0:
block4_list.append(ShallowResNetBlock(256, 512, downsample=True))
else:
block4_list.append(ShallowResNetBlock(512, 512))
self.block4 = torch.nn.Sequential(*block4_list)
class DeepResNet(ResNet):
def __init__(self, input_size, num_classes, block_num_list, resnet_type="3x3", **kargs):
super(DeepResNet, self).__init__(input_size, 2048, num_classes, resnet_type, **kargs)
# assert
for i in range(4):
assert block_num_list[i] > 0, "block num needs greater than 0!"
# stage 2
block1_list = []
for i in range(block_num_list[0]):
if i == 0:
block1_list.append(DeepResNetBlock(64,256))
else:
block1_list.append(DeepResNetBlock(256,256))
self.block1 = torch.nn.Sequential(*block1_list)
# stage 3
block2_list = []
for i in range(block_num_list[1]):
if i == 0:
block2_list.append(DeepResNetBlock(256, 512, downsample=True))
else:
block2_list.append(DeepResNetBlock(512, 512))
self.block2 = torch.nn.Sequential(*block2_list)
# stage 4
block3_list = []
for i in range(block_num_list[2]):
if i == 0:
block3_list.append(DeepResNetBlock(512, 1024, downsample=True))
else:
block3_list.append(DeepResNetBlock(1024, 1024))
self.block3 = torch.nn.Sequential(*block3_list)
# stage 5
block4_list = []
for i in range(block_num_list[3]):
if i == 0:
block4_list.append(DeepResNetBlock(1024, 2048, downsample=True))
else:
block4_list.append(DeepResNetBlock(2048, 2048))
self.block4 = torch.nn.Sequential(*block4_list)
def ResNet18(input_size, num_classes, resnet_type="3x3", **kargs):
return ShallowResNet(input_size, num_classes, block_num_list=[2,2,2,2], resnet_type=resnet_type, **kargs)
def ResNet34(input_size, num_classes, resnet_type="3x3", **kargs):
return ShallowResNet(input_size, num_classes, block_num_list=[3,4,6,3], resnet_type=resnet_type, **kargs)
def ResNet50(input_size, num_classes, resnet_type="3x3", **kargs):
return DeepResNet(input_size, num_classes, block_num_list=[3,4,6,3], resnet_type=resnet_type, **kargs)
def ResNet101(input_size, num_classes, resnet_type="3x3", **kargs):
return DeepResNet(input_size, num_classes, block_num_list=[3,4,23,3], resnet_type=resnet_type, **kargs) | zllz4/Face-Recognition | models/resnet.py | resnet.py | py | 11,268 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.