seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
13212916594 | import asyncio
import json
from datetime import datetime, timedelta
import discord
from discord.ext import commands
import libs.config as config
from libs.embedmaker import officialEmbed
from libs.utils import has_role
#####################
# Strings variables #
#####################
s_vote = config.get_string("vote")
s_no_perm = config.get_string("commands")["no_perm"]
###################
# Other variables #
###################
# Channel & role ID.
id_announcements = config.get_config("channels")["announcements"]
id_admin = config.get_config("roles")["admin"]
# Persistence File.
file_persistence = config.get_config("persistence")["vote"]
#############
# Functions #
#############
def clear_file():
"""Clears the persistance file to default data. It returns the message ID if there is one."""
ret = None
file = open(file_persistence, "r").read()
data = json.loads(file)
if not "no_vote" in data:
# Saves message ID.
ret = data["message_id"]
data = {"no_vote":True}
with open(file_persistence, 'w') as outfile:
json.dump(data, outfile)
return ret
############
# COG Body #
############
class Vote(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def vote_output(self, data):
"""Send the output of the vote into the announcement channel."""
deltaTime = datetime.strptime(data["ending_time"], "%Y-%m-%dT%H:%M:%S.%f") - datetime.now()
vTimeSec = int(deltaTime.total_seconds())
vDesc = data["desc"]
vOpt = data["options"]
vMessageId = data["message_id"]
# Waits...
# We check that the time is actually in the future.
# Otherwise, send result now.
if vTimeSec > 0:
await asyncio.sleep(vTimeSec)
# Sends results.
try:
announcementChan = self.bot.get_channel(id_announcements)
vResult = (await announcementChan.fetch_message(vMessageId)).reactions
embed = officialEmbed("Vote results", "Topic: " + vDesc)
for i in range(0, len(vOpt)):
embed.add_field(
name=vOpt[i], value=str(vResult[i].count-1))
await announcementChan.send(embed=embed)
except Exception as e:
print(e)
# Vote is finished, erase JSON data.
clear_file()
# Command to make a new vote.
@commands.command(description=s_vote["help_desc"] + " (Admin)", hidden=True)
async def vote(self, ctx):
vDesc = ""
vOpt = []
vReac = []
vTimeHour = 0
# Remove the command.
await ctx.message.delete()
# Check for the user to be admin.
if not has_role(ctx.author, id_admin):
botMsg = await ctx.send(s_no_perm)
await asyncio.sleep(5)
await botMsg.delete()
return
# Check for the author.
def checkAuth(m):
return m.author == ctx.author
def checkDone(m):
return (m.author == ctx.author and m.content.lower() == "done")
botMsgCancel = await ctx.send(s_vote["cancel_message"])
# Retrieve the vote's description.
botMsg = await ctx.send(s_vote["vote_desc"])
vDescMsg = await self.bot.wait_for('message', check=checkAuth)
vDesc = vDescMsg.content
await botMsg.delete()
await vDescMsg.delete()
if vDescMsg.content.lower() == "cancel":
await botMsgCancel.delete()
confirmDelMsg = await ctx.send(s_vote["canceled"])
await asyncio.sleep(5)
await confirmDelMsg.delete()
return
# Retrieve the vote's options and reactions.
botMsg = await ctx.send(s_vote["vote_options"])
isDone = False
optMsg = []
# Gettings all options.
while not isDone:
msg = await self.bot.wait_for('message', check=checkAuth)
if msg.content.lower() == "done":
isDone = True
elif msg.content.lower() == "cancel":
await botMsgCancel.delete()
await botMsg.delete()
await msg.delete()
for m in optMsg:
await m.delete()
confirmDelMsg = await ctx.send(s_vote["canceled"])
await asyncio.sleep(5)
await confirmDelMsg.delete()
return
else:
vOpt.append(msg.content)
optMsg.append(msg)
# Clearing the messages.
await botMsg.delete()
for m in optMsg:
await m.delete()
# Doing the same but for reactions.
botMsgText = s_vote["vote_reactions"]
for i in range(0, len(vOpt)):
botMsgText += ("\n" + str(i+1) + ". - " + vOpt[i])
botMsg = await ctx.send(botMsgText)
# Waits for the DONE message.
isDone = False
while not isDone:
msg = await self.bot.wait_for('message', check=checkAuth)
if msg.content.lower() == "cancel":
await botMsgCancel.delete()
await botMsg.delete()
await msg.delete()
confirmDelMsg = await ctx.send(s_vote["canceled"])
await asyncio.sleep(5)
await confirmDelMsg.delete()
return
# Checks if the amount of emojis matches the amount of options.
cacheBotMsg = await ctx.channel.fetch_message(botMsg.id)
if len(cacheBotMsg.reactions) != len(vOpt):
await msg.delete()
errorMsg = await ctx.send(s_vote["reactions_amount_wrong"])
await asyncio.sleep(5)
await errorMsg.delete()
else:
isDone = True
# Sets the emojis.
for r in cacheBotMsg.reactions:
vReac.append(r.emoji)
# Clears msg.
await botMsg.delete()
await msg.delete()
# Gets the time the vote should last.
isDone = False
while(not isDone):
timeAsk = await ctx.send(s_vote["vote_time"])
msg = await self.bot.wait_for('message', check=checkAuth)
if msg.content.lower() == "cancel":
await botMsgCancel.delete()
await msg.delete()
await timeAsk.delete()
confirmDelMsg = await ctx.send(s_vote["canceled"])
await asyncio.sleep(5)
await confirmDelMsg.delete()
return
try:
vTimeHour = int(msg.content)
isDone = True
except:
errorMsg = await ctx.send(s_vote["time_int_only"])
await asyncio.sleep(2)
await errorMsg.delete()
isDone = False
finally:
await timeAsk.delete()
await msg.delete()
# Confirmation embed.
embed = officialEmbed(title=s_vote["recap"], desc=vDesc, footer=s_vote["recap_time"].format(vTimeHour))
for i in range(0, len(vOpt)):
embed.add_field(name=vReac[i], value=vOpt[i])
# Sends embed.
botEmbed = await ctx.send(embed=embed)
# Asks for validation.
botMsg = await ctx.send(s_vote["confirm"])
voteValid = await self.bot.wait_for('message', check=checkAuth)
# Checks validation's answer.
if not voteValid.content.lower() == "yes":
cancelMsg = await ctx.send(s_vote["canceled"])
# Removes useless msg.
await botMsgCancel.delete()
await botEmbed.delete()
await botMsg.delete()
await voteValid.delete()
await cancelMsg.delete()
else:
# Removes useless msg.
await botMsgCancel.delete()
await botMsg.delete()
await voteValid.delete()
# Makes embed.
embed = officialEmbed("Vote", vDesc)
for i in range(0, len(vOpt)):
embed.add_field(name=vReac[i], value=vOpt[i])
# Sends the vote.
chan_announcement = self.bot.get_channel(id_announcements)
vEmbed = await chan_announcement.send(embed=embed)
# Adds the reactions to it.
for i in range(0, len(vReac)):
await vEmbed.add_reaction(vReac[i])
# Saves it in the persistence file.
endingTime = (datetime.now() + timedelta(hours=vTimeHour)).strftime('%Y-%m-%dT%H:%M:%S.%f')
data = {"desc": vDesc, "options":vOpt, "ending_time":endingTime, "message_id":vEmbed.id}
with open(file_persistence, 'w') as outfile:
json.dump(data, outfile)
# Waits and fetches results.
await self.vote_output(data)
# Command to stop ongoing vote.
@commands.command(description=s_vote["help_desc_canceled"] + " (Admin)", hidden=True)
async def votecancel(self, ctx):
if has_role(ctx.author, id_admin):
vote_id = clear_file()
if vote_id != None:
chan_announcement = self.bot.get_channel(id_announcements)
vote_msg = (await chan_announcement.fetch_message(vote_id))
await vote_msg.delete()
await ctx.send(s_vote["canceled"])
else:
await ctx.send(s_vote["nothing_to_cancel"])
# Loads persistence, and checks if a vote is ongoing.
@commands.Cog.listener()
async def on_ready(self):
# Loads.
try:
file = open(file_persistence, "r").read()
data = json.loads(file)
except:
data = {"no_vote":True}
with open(file_persistence, 'w') as file_out:
json.dump(data, file_out)
# Check if there is an ongoing vote.
if not "no_vote" in data:
print("[INFO]\tVote found!")
# Gets back to the vote.
await self.vote_output(data)
def setup(bot):
bot.add_cog(Vote(bot))
| rroethof/thm-discord-bot | cogs/vote.py | vote.py | py | 10,111 | python | en | code | null | github-code | 90 |
69929985256 | # init
import numpy as np
import matplotlib.pyplot as plt
file = open("HW_1_output.txt","w")
###### Prob 1
file.write("Prob 1 Results:"+"\n")
### (a) new matrix with new row
file.write("(a):"+"\n")
A = np.array([[1,2,3,1],[4,1,3,2],[4,4,3,1],[3,2,3,3]])
z = np.array([[1,1,3,1]])
B = np.vstack((A[0:2,:],z,A[2:5,:]))
file.write(str(B)+"\n")
### (b) new matrix with new column
file.write("(b):"+"\n")
A = np.array([[1,2,3,1],[4,1,3,2],[4,4,3,1],[3,2,3,3]])
z = np.array([[1,1,3,1]])
C = np.hstack((A[:,0:2],z.T,A[:,2:5]))
file.write(str(C)+"\n"+"\n")
###### Prob 2
file.write("Prob 2 Results:"+"\n"+"\n")
import math as math
### (a) range
file.write("(a):"+"\n")
x = np.arange(-1*math.pi,math.pi,0.05*math.pi)
file.write(str(x)+"\n")
### (b) sin function
file.write("(b):"+"\n")
y = np.sin(x)
file.write(str(y)+"\n")
### (c) plot
file.write("(c):"+"Plot below"+"\n"+"\n")
#plt.plot(x,y)
#plt.show()
## THIS GENERATES A PLOT THAT I CAN ATTACH
###### Prob 3
file.write("Prob 3 Results:"+"\n")
a = np.ones(6)
b = np.ones(4)*-.05
M = np.diag(a)+np.diag(b,-2)
file.write(str(M)+"\n"+"\n")
###### Prob 4
file.write("Prob 4 Results:"+"\n")
rand = np.random.rand(5,5)
det = np.linalg.det(rand)
file.write(str(rand)+"\n")
file.write(str(det)+"\n"+"\n")
###### Prob 5
file.write("Prob 5 Results:"+"\n")
coef = np.array([[4,2,1],[3,1,1],[1,2,1]])
rhs = np.array([[2],[1],[1]])
soln = np.linalg.solve(coef, rhs)
file.write("x1:" + str(soln[0])[2:8] + " x2:" + str(soln[1])[2:8] + " x3:" + str(soln[2])[2:8]+"\n")
file.close()
| gigamosh57/CU_NuMeth_Fall2014 | HW_1.py | HW_1.py | py | 1,540 | python | en | code | 0 | github-code | 90 |
18202538499 | n = int(input())
a = list(map(int, input().split()))
m = 1
if 0 in a:
print(0)
else:
for i in range(n):
m = m * a[i]
if m > 10 ** 18:
print(-1)
break
elif i == n - 1:
print(m) | Aasthaengg/IBMdataset | Python_codes/p02658/s353224254.py | s353224254.py | py | 243 | python | en | code | 0 | github-code | 90 |
35289690038 | import tkinter as tk
from PIL import ImageTk,Image
global entry
global colour
import math
from math import ceil, floor
g = 9.8
Rdir = "FLAT"
#input window
root = tk.Tk()
canvas1 = tk.Canvas(root, width=800, height=550)
canvas1.pack()
labeltop = tk.Label(root, text='User Input')
labeltop.config(font=('Arial', 20))
canvas1.create_window(400, 40, window=labeltop)
width = 550
height = 300
path = 'ExampleOfForce.jpg'
img = Image.open(path)
img = img.resize((width, height), Image.ANTIALIAS)
photoImg = ImageTk.PhotoImage(img)
canvas1.create_image(390, 210, image=photoImg)
errorLab = tk.Label(root, text='Enter Numeric values only')
errorLab.config(font=('Arial', 9))
errorLab.config(bg="red")
canvas1.create_window(700, 375, window=errorLab)
RangLab = tk.Label(root, text='What is the angle between the ramp and horizontal (in degrees -90 <= Ө <= 90)? ')
RangLab.config(font=('Arial', 9))
canvas1.create_window(400, 400, window=RangLab)
RangBox = tk.Entry(root)
canvas1.create_window(700, 400, window=RangBox)
mLab = tk.Label(root, text='Mass (kg): ')
mLab.config(font=('Arial', 9))
canvas1.create_window(590, 420, window=mLab)
mBox = tk.Entry(root)
canvas1.create_window(700, 420, window=mBox)
FappLab = tk.Label(root, text='Magnitude of Force Applied (N): ')
FappLab.config(font=('Arial', 9))
canvas1.create_window(533, 440, window=FappLab)
FappBox = tk.Entry(root)
canvas1.create_window(700, 440, window=FappBox)
FangLab = tk.Label(root, text='Angle of applied force (degrees): ')
FangLab.config(font=('Arial', 9))
canvas1.create_window(529, 460, window=FangLab)
FangBox = tk.Entry(root)
canvas1.create_window(700, 460, window=FangBox)
coefLab = tk.Label(root, text='Coefficient of friction (If no friction, input 0): ')
coefLab.config(font=('Arial', 9))
canvas1.create_window(505, 480, window=coefLab)
coefBox = tk.Entry(root)
canvas1.create_window(700, 480, window=coefBox)
def floatRound(num, places): #rounds a number to a given decimal place
index = str(num).find('.')
x = str(num)[int(index)+1:]
if (len(x)>=places+1):
y = x[places: places + 1]
if (int(y)>=5):
n = ceil(num * (10**places)) / float(10**places)
else:
n = floor(num * (10**places)) / float(10**places)
else:
n = num
return n
#X component of gravity relative to the surface the object is on
def GforceX(m, a, Rdir):
if (Rdir == "RIGHT" or Rdir == "LEFT"):
F = (-1)*(m)*(g)*floatRound(math.sin(math.radians(a)), 2)
else:
F= 0
return floatRound(F, 3)
#Y component of gravity relative to the surface the object is on
def GforceY(m, a):
F = (-1)*(m)*(g)*floatRound(math.cos(math.radians(a)), 2)
return floatRound(F, 3)
#X component of the applied force
def AppForceX(F, a):
Fx = (F)*floatRound(math.cos(math.radians(a)),2)
return floatRound(Fx, 3)
#Y component of the applied force
def AppForceY(F, a):
Fy = (F)*floatRound(math.sin(math.radians(a)),2)
return floatRound(Fy, 3)
#Normal force
def NormForce(Fg, Fy):
if((Fg+Fy)< 0):
Fn = (-1)*(Fg+Fy)
elif((Fg+Fy)>0):
Fn = 0
else:
Fn = (Fg + Fy)
print (Fn)
return floatRound(Fn, 3)
#returns the direction the object is moving in
def checkMotion(F1, F2, F3):
F = F1 + F2 + F3
if (F>0):
d = 1 #motion is to the right
n = "right"
elif (F<0):
d = -1 #motion is to the left
n = "left"
else:
d = 0 #no motion
n = ""
return d, n.upper()
#friction force
def FricForce(c, Fn, dir, Fy, Fg):
if (Fy + Fg > 0):
Ff = 0
else:
if (dir == 1):
Ff = (-1)*(c)*(Fn)
elif (dir == -1):
Ff = (c) * (Fn)
else:
Ff = 0
return floatRound(Ff, 3)
#Y component of the Net force relative to the surface the object is on
def NetForceY(Fn, Fg, Fy):
NFy = floatRound(Fn + Fg + Fy, 3)
return NFy
#X component of Net force
def NetForceX(Fx, Ff, Fg):
if (abs(Ff)> abs(Fx+Fg)):
NFx = 0
else:
NFx = floatRound(Fx + Ff + Fg, 3)
return NFx
#acceleration
def accel(F, m):
if (m > 0):
a = F/m
else:
a = 0
return a
#values for output
def Calculator(Rang, m , Fapp, Fang, coef):
if (Rang > 0):
Rdir = "RIGHT"
elif (Rang < 0):
Rdir = "LEFT"
else:
Rdir = "FLAT"
Fgy = GforceY(m, Rang)
print("Fgy = " + str(Fgy))
Fgx = GforceX(m, Rang, Rdir)
print("Fgx = " + str(Fgx))
Fappx = AppForceX(Fapp, Fang)
print("Fappx = " + str(Fappx))
Fappy = AppForceY(Fapp, Fang)
print("Fappy = " + str(Fappy))
Fnorm = NormForce(Fgy, Fappy)
print("Fnorm = " + str(Fnorm))
dir, dirword = checkMotion(Fappx, Fgx, 0)
Ffric = FricForce(coef, Fnorm, dir, Fappy, Fgy)
print("Ffric = " + str(Ffric))
NFx = NetForceX(Fappx, Ffric, Fgx)
print("NFx = " + str(NFx))
NFy = NetForceY(Fnorm, Fgy, Fappy)
print("NFy = " + str(NFy))
ax = accel(NFx, m)
ay = accel(NFy, m)
#results
if (ax != 0):
if (Rdir != "FLAT"):
if (dirword == Rdir):
motion = "up"
else:
motion = "down"
phrase = ("Accelerating " + motion + " the ramp at " + str(floatRound(ax, 2)) + " m/s^2")
else:
phrase = ("Accelerating to the " + dirword.lower() + " at " + str(floatRound(ax, 2)) + " m/s^2")
else:
phrase = ("Object is not accelerating.")
print (phrase)
return (Fgy, Fgx, Fappx, Fappy, Fnorm, Ffric, NFx, NFy, phrase)
def label(canvas, vari, size, x, y):
vari.config(font=('Arial', size))
canvas.create_window(x, y, window = vari)
def outputWin(Fgy, Fgx, Fappx, Fappy, Fnorm, Ffric, NFx, NFy, phrase):
canvas1.destroy()
canvas2 = tk.Canvas(root, width=800, height=550)
canvas2.pack()
LabelTop = tk.Label(root, text='Results')
label(canvas2, LabelTop, 20, 400, 50)
RangLab = tk.Label(root, text=("Ramp Angle: " + RangBox.get()))
label(canvas2, RangLab, 9, 100, 100)
mLab = tk.Label(root, text=("Mass: " + mBox.get()))
label(canvas2, mLab, 9, 265, 100)
FappLab = tk.Label(root, text=("Applied Force: " +FappBox.get()))
label(canvas2, FappLab, 9, 400, 100)
FangLab = tk.Label(root, text=("Angle of Force: " + FangBox.get()))
label(canvas2, FangLab, 9, 535, 100)
coefLab = tk.Label(root, text=("Coefficient of Friction: " + coefBox.get()))
label(canvas2, coefLab, 9, 690, 100)
FgxLab = tk.Label(root, text=("X-component of gravitational Force: " + str(Fgx)))
label(canvas2, FgxLab, 15,400, 150)
FgyLab = tk.Label(root, text=("Y-component of gravitational Force: " + str(Fgy)))
label(canvas2, FgyLab, 15, 400, 180)
FappxLab = tk.Label(root, text=("X-component of applied Force: " + str(Fappx)))
label(canvas2, FappxLab, 15,400, 210)
FappyLab = tk.Label(root, text=("Y-component of applied Force: " + str(Fappy)))
label(canvas2, FappyLab, 15, 400, 240)
FnormLab = tk.Label(root, text=("Normal Force: " + str(Fnorm)))
label(canvas2, FnormLab, 15, 400, 270)
FfricLab = tk.Label(root, text=("Friction Force: " + str(abs(Ffric))))
label(canvas2, FfricLab, 15, 400, 300)
NFxLab = tk.Label(root, text=("X-component of Net Force: " + str(NFx)))
label(canvas2, NFxLab, 15, 400, 330)
NFyLab = tk.Label(root, text=("Y-component of Net Force: " + str(NFy)))
label(canvas2, NFyLab, 15, 400, 360)
phraseLab = tk.Label(root, text=(str(phrase)))
label(canvas2, phraseLab, 20, 400, 400)
def values():
Rang = float(RangBox.get())
m = float(mBox.get())
Fapp = float(FappBox.get())
Fang = float(FangBox.get())
coef = float(coefBox.get())
print(Rang, m, Fapp, Fang, coef)
Fgy, Fgx, Fappx, Fappy, Fnorm, Ffric, NFx, NFy, phrase = Calculator(Rang, m, Fapp, Fang, coef)
outputWin(Fgy, Fgx, Fappx, Fappy, Fnorm, Ffric, NFx, NFy, phrase)
button1 = tk.Button(root, text=' Next ', command=values, bg='gray', font=('Arial', 11, 'bold'))
canvas1.create_window(700, 520, window=button1)
root.mainloop()
#this code is for checking the inputs of the userinterface and making sure they are within bounds.
#It doesn't work yet, which is why it is commented out.
# def check():
# try:
# Rang = float(RangBox.get())
# m = float(mBox.get())
# Fapp = float(FappBox.get())
# Fang = float(FangBox.get())
# coef = float(coefBox.get())
# print(Rang, m, Fapp, Fang, coef)
# errorLab.destroy()
# except:
# errorLab = tk.Label(root, text='Enter Numeric values only')
# errorLab.config(font=('Arial', 9))
# errorLab.config(bg="red")
# canvas1.create_window(700, 380, window=errorLab)
# x=1
# return
# y = 1
# while (y == 1):
# RangBnds = 1
# mBnds = 1
# FappBnds = 1
# FangBnds = 1
# coefBnds = 1
# if (Rang < -90 or Rang > 90):
# RangBnds = 2
# if (m < 0):
# mBnds = 2
# if (Fapp < 0):
# FappBnds = 2
# if (Fang > 360):
# FangBnds = 2
# if (coef < 0):
# coefBnds = 2
# if (RangBnds == 1 and mBnds == 1 and FappBnds == 1 and FangBnds == 1 and coefBnds == 1):
# print ("ok")
# y = 2
# else:
# if (RangBnds == 2):
# RangBox.configure({"background": "red"})
# if (mBnds == 2):
# mBox.configure({"background": "red"})
# if (FappBnds == 2):
# FappBox.configure({"background": "red"})
# if (FangBnds == 2):
# FangBox.configure({"background": "red"})
# if (coefBnds == 2):
# coefBox.configure({"background": "red"})
# return
# def next():
# RangBnds = 1
# mBnds = 1
# FappBnds = 1
# FangBnds = 1
# coefBnds = 1
# RangBnds, mBnds, FappBnds, FangBnds, coefBnds = check()
#
| asatapathy3254/object_on_ramp | ObjectOnRampWUserInterface.py | ObjectOnRampWUserInterface.py | py | 10,349 | python | en | code | 0 | github-code | 90 |
71232343336 | #! -*- coding:utf-8 -*-
import heapq
l1 = [34, 25, 12, 99, 87, 63, 58, 78, 88, 92]
l2 = [
{'name': 'IBM', 'shares': 100, 'price': 91.1},
{'name': 'AAPL', 'shares': 50, 'price': 543.22},
{'name': 'FB', 'shares': 200, 'price': 21.09},
{'name': 'HPQ', 'shares': 35, 'price': 31.75},
{'name': 'YHOO', 'shares': 45, 'price': 16.35},
{'name': 'ACME', 'shares': 75, 'price': 115.65}
]
print(heapq.nlargest(3, l1))
print(heapq.nsmallest(3, l1))
print(heapq.nlargest(3, l2, key=lambda x:x['price']))
print(heapq.nsmallest(3,l2,key=lambda x:x['shares'])) | buptatx/myPython100Days | scripts/16_heapq.py | 16_heapq.py | py | 571 | python | en | code | 0 | github-code | 90 |
15329333205 | from autopilot import prefs
# if prefs.AGENT in ("TERMINAL", "DOCS"):
HAVE_PYSIDE = False
try:
from PySide2 import QtCore
HAVE_PYSIDE = True
except ImportError:
pass
import json
import pandas as pd
from scipy.stats import linregress
# from subprocess import call
from threading import Thread
import os
import numpy as np
class Param(object):
"""
In the future, we will implement a coherent Parameter management system
Warning:
Not Implemented.
"""
# Class to hold and verify task and gui parameters
tag = None # human-readable description of parameter
type = None # string that describes the type of input or param
# possible types
types = ['int', 'bool', 'list']
def __init__(self, **kwargs):
"""
Args:
**kwargs:
"""
for k, v in kwargs.items():
setattr(self, k, v)
# enable dictionary-like behavior
def __getitem__(self, key):
"""
Args:
key:
"""
return self.__dict__[key]
def __setitem__(self, key, value):
"""
Args:
key:
value:
"""
self.__dict__[key] = value
def __delitem__(self, key):
"""
Args:
key:
"""
del self.__dict__[key]
def __contains__(self, key):
"""
Args:
key:
"""
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
# def validate(self):
# if all([self.id, self.to, self.sender, self.key]):
# return True
# else:
# return False
if HAVE_PYSIDE:
class InvokeEvent(QtCore.QEvent):
"""
Sends signals to the main QT thread from spawned message threads
See `stackoverflow <https://stackoverflow.com/a/12127115>`_
"""
EVENT_TYPE = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())
def __init__(self, fn, *args, **kwargs):
# type: (function, object, object) -> None
"""
Accepts a function, its args and kwargs and wraps them as a
:class:`QtCore.QEvent`
"""
QtCore.QEvent.__init__(self, InvokeEvent.EVENT_TYPE)
self.fn = fn
self.args = args
self.kwargs = kwargs
class Invoker(QtCore.QObject):
"""
Wrapper that calls an evoked event made by :class:`.InvokeEvent`
"""
def event(self, event):
"""
Args:
event:
"""
event.fn(*event.args, **event.kwargs)
return True
class ReturnThread(Thread):
"""
Thread whose .join() method returns the value from the function
thx to https://stackoverflow.com/a/6894023
"""
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self, timeout=None):
Thread.join(self, timeout)
return self._return
def list_subjects(pilot_db=None):
"""
Given a dictionary of a pilot_db, return the subjects that are in it.
Args:
pilot_db (dict): a pilot_db. if None tried to load pilot_db with :method:`.load_pilotdb`
Returns:
subjects (list): a list of currently active subjects
"""
if pilot_db is None:
pilot_db = load_pilotdb()
subjects = []
for pilot, values in pilot_db.items():
if 'subjects' in values.keys():
subjects.extend(values['subjects'])
return subjects
def load_pilotdb(file_name=None, reverse=False):
"""
Try to load the file_db
Args:
reverse:
file_name:
Returns:
"""
if file_name is None:
file_name = '/usr/autopilot/pilot_db.json'
with open(file_name) as pilot_file:
pilot_db = json.load(pilot_file)
if reverse:
# simplify pilot db
pilot_db = {k: v['subjects'] for k, v in pilot_db.items()}
pilot_dict = {}
for pilot, subjectlist in pilot_db.items():
for ms in subjectlist:
pilot_dict[ms] = pilot
pilot_db = pilot_dict
return pilot_db
def coerce_discrete(df, col, mapping={'L':0, 'R':1}):
"""
Coerce a discrete/string column of a pandas dataframe into numeric values
Default is to map 'L' to 0 and 'R' to 1 as in the case of Left/Right 2AFC tasks
Args:
df (:class:`pandas.DataFrame`) : dataframe with the column to transform
col (str): name of column
mapping (dict): mapping of strings to numbers
Returns:
df (:class:`pandas.DataFrame`) : transformed dataframe
"""
for key, val in mapping.items():
df.loc[df[col]==key,col] = val
# if blanks, warn and remove
if '' in df[col].unique():
n_blanks = sum(df[col]=='')
Warning('{} blank rows detected, removing.'.format(n_blanks))
df.drop(df.index[df[col]==''], axis=0, inplace=True)
df = df.astype({col:float})
return df
def find_recursive(key, dictionary):
"""
Find all instances of a key in a dictionary, recursively.
Args:
key:
dictionary:
Returns:
list
"""
for k, v in dictionary.iteritems():
if k == key:
yield v
elif isinstance(v, dict):
for result in find_recursive(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in find_recursive(key, d):
yield result
#
# def update_pis(github=True, apt=False, pilot_select = None, prefs_fn = None):
# """
# Args:
# github:
# apt:
# pilot_select:
# prefs_fn:
# """
# # update github, or apt?
# # should limit pilots or use all?
# # load prefs from default location or use different?
# if prefs_fn is None:
# prefs = get_prefs()
# else:
# prefs = get_prefs(prefs_fn)
#
# # get ips from pilot db
# with open(prefs['PILOT_DB'], 'r') as pilot_db:
# pilots = json.load(pilot_db)
#
# # if we were passed a list of pilots to subset then do it
# if pilot_select is not None:
# pilots = {k: v for k, v in pilots.items() if k in pilot_select }
#
# if github is True:
# ips = ['pi@'+v['ip'] for k,v in pilots.items()]
# ip_string = " ".join(ips)
# call('parallel-ssh', '-H', ip_string, 'git --git-dir=/home/pi/git/autopilot/.git pull')
| pauljerem/autopilot | autopilot/core/utils.py | utils.py | py | 6,843 | python | en | code | null | github-code | 90 |
34730775437 | #!/usr/bin/env python3
"""Defines `mat_mul`."""
def mat_mul(mat1, mat2):
"""Performs 2D-matrix multiplication."""
if len(mat1) == 0 or len(mat1[0]) != len(mat2):
return None
# transpose matrix 2
mat2 = list(zip(*mat2))
dot_products = list()
for mat1_row in mat1:
dot_products.append([])
for mat2_row in mat2:
# Compute dot product
products = [a * b for a, b in zip(mat1_row, mat2_row)]
dot_product = sum(products)
dot_products[-1].append(dot_product)
return dot_products
| keysmusician/holbertonschool-machine_learning | math/0x00-linear_algebra/8-ridin_bareback.py | 8-ridin_bareback.py | py | 574 | python | en | code | 1 | github-code | 90 |
33895874605 | from functools import wraps
import time
def cache(timeout=3600):
def deco(func):
memo = {}
times = {}
@wraps(func)
def _wrapper(*args):
res = memo.get(args, None)
if res is not None and (timeout < 0 or (time.time() - times[args] < timeout)):
return res
else:
res = func(*args)
memo[args] = res
times[args] = time.time()
return res
return _wrapper
return deco
| KAILINYmq/python-flask-gotel | agile/commons/simple_cache.py | simple_cache.py | py | 524 | python | en | code | 1 | github-code | 90 |
38844783276 | # coding:utf-8
import json
import pytest
from datetime import datetime
from apis.device_management.device_account.apis_device_account import Apis
@pytest.mark.bvt
@pytest.mark.device
@pytest.mark.flaky(reruns=3, reruns_delay=3)
def test_get_measurement_group():
"""
获取默认采集定义信息
"""
try:
params = {
"skipCount": 0,
"maxResultCount": 99999,
"_t": datetime.now()
}
res = Apis().api_measure_get_measurement_group(params=params)
assert res.status_code <= 200, "Http请求状态码错误"
assert json.loads(res.text)['data']['totalCount'] > 0, "预置的采集定义数量为零! "
for i in json.loads(res.text)['data']['items']:
assert i['type'] == 90, "存在type不等于90的预置采集定义"
except Exception as e:
raise e
| zj1995-09-09/supercare_api | testcase/device_management/device_account/test_measure_get_measurement_group.py | test_measure_get_measurement_group.py | py | 873 | python | en | code | 0 | github-code | 90 |
23475304188 | from django import forms
from django.http import HttpResponseRedirect
from django.test import RequestFactory, TestCase
from data_research.handlers import ConferenceRegistrationHandler
class MockConferenceRegistrationForm(forms.Form):
def __init__(self, *args, **kwargs):
kwargs.pop('capacity')
kwargs.pop('govdelivery_code')
kwargs.pop('govdelivery_question_id')
kwargs.pop('govdelivery_answer_id')
super(MockConferenceRegistrationForm, self).__init__(*args, **kwargs)
def save(self, commit=False):
pass
@property
def at_capacity(self):
return False
class AtCapacityConferenceRegistrationForm(MockConferenceRegistrationForm):
@property
def at_capacity(self):
return True
class ExceptionThrowingConferenceRegistrationForm(
MockConferenceRegistrationForm
):
def save(self, commit=False):
raise RuntimeError('something went wrong')
class TestConferenceRegistrationHandler(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.path = '/path/to/form'
self.page = object()
self.block_value = {
'capacity': '100',
'govdelivery_code': 'ABC123',
'govdelivery_question_id': '12345',
'govdelivery_answer_id': '67890',
'failure_message': "Something went wrong in a test.",
}
def get_handler(self, request=None, form_cls=None):
if request is None:
request = self.factory.post(self.path)
if form_cls is None:
form_cls = MockConferenceRegistrationForm
return ConferenceRegistrationHandler(
page=self.page,
request=request,
block_value=self.block_value,
form_cls=form_cls
)
def test_process_not_submitted_returns_empty_form(self):
response = self.get_handler().process(is_submitted=False)
self.assertFalse(response['form'].is_bound)
def test_process_not_at_capacity(self):
response = self.get_handler().process(is_submitted=False)
self.assertFalse(response['is_at_capacity'])
def test_process_at_capacity(self):
handler = self.get_handler(
form_cls=AtCapacityConferenceRegistrationForm
)
response = handler.process(is_submitted=False)
self.assertTrue(response['is_at_capacity'])
def test_process_not_submitted_not_successful_submission(self):
response = self.get_handler().process(is_submitted=False)
self.assertFalse(response['is_successful_submission'])
def test_request_with_query_string_marks_successful_submission(self):
request = self.factory.get('/?success')
handler = self.get_handler(request=request)
response = handler.process(is_submitted=False)
self.assertTrue(response['is_successful_submission'])
def test_successful_process_returns_redirect(self):
response = self.get_handler().process(is_submitted=True)
self.assertIsInstance(response, HttpResponseRedirect)
def test_successful_process_returns_temporary_redirect(self):
response = self.get_handler().process(is_submitted=True)
self.assertEqual(response.status_code, 302)
def test_process_returns_redirect_with_query_string_parameter(self):
response = self.get_handler().process(is_submitted=True)
self.assertEqual(response['Location'], self.path + '?success')
def test_process_exception_sets_form_error(self):
handler = self.get_handler(
form_cls=ExceptionThrowingConferenceRegistrationForm
)
response = handler.process(is_submitted=True)
self.assertEqual(
response['form'].non_field_errors(),
["Something went wrong in a test."]
)
| KonstantinNovizky/Financial-System | python/consumerfinance.gov/cfgov/data_research/tests/test_handlers.py | test_handlers.py | py | 3,817 | python | en | code | 1 | github-code | 90 |
20442746291 | import re
def loadDataFromFile(fname):
res = []
with open(fname, 'r') as fp:
for line in fp:
lineStep = re.sub('bags contain|bag\,|bags\,', ':', line.strip())
lineClean = re.sub('bag\.|bags\.', '', lineStep)
lineSplit = lineClean.strip().split(':')
containerColor = lineSplit[0].strip()
subItems = []
for item in lineSplit[1:]:
strippedItem = item.strip()
if strippedItem == "no other":
continue
contentSize = int(strippedItem[0])
contentColor = strippedItem[1:].strip()
subItems.append({'size': contentSize, 'color': contentColor})
res.append({'container': containerColor, 'content': subItems})
return res
def part1(bagMap):
initialColor = 'shiny gold'
workList = [item['container'] for item in bagMap if initialColor in [bag['color'] for bag in item['content']]]
res = workList.copy();
while workList:
workItem = workList.pop(0)
containers = [item['container'] for item in bagMap if workItem in [bag['color'] for bag in item['content']]]
for item in containers:
if item not in res:
res.append(item)
if item not in workList:
workList.append(item)
return len(res)
def getBagContent(bagMap, color):
for item in bagMap:
if item['container'] == color:
return item['content']
def part2(bagMap, initialColor):
bagCount = 0
workList = getBagContent(bagMap, initialColor)
if workList:
for item in workList:
bagCount = bagCount + item['size']*(part2(bagMap, item['color']) + 1)
else:
bagCount = 0
return bagCount
bagMapping = loadDataFromFile("input_day7.dat")
print("Solution for part 1: {}".format(part1(bagMapping)))
print("Solution for part 2: {}".format(part2(bagMapping, 'shiny gold'))) | tmarketin/AdventOfCode | 2020/Day7/sol_day7.py | sol_day7.py | py | 1,782 | python | en | code | 0 | github-code | 90 |
18122201649 | class Dice(object):
"""Dice Class
"""
def __init__(self, numbers):
"""
Args:
numbers:
"""
self.numbers_inverse = {numbers[0]: 1, numbers[1]: 2, numbers[2]: 3, numbers[3]: 4, numbers[4]: 5,
numbers[5]: 6}
self.numbers = {1: numbers[0], 2: numbers[1], 3: numbers[2], 4: numbers[3], 5: numbers[4], 6: numbers[5]}
self.vertical = [self.numbers[1], self.numbers[2], self.numbers[6], self.numbers[5]]
self.horizontal = [self.numbers[4], self.numbers[1], self.numbers[3], self.numbers[6]]
def roll_dice(self, str):
"""
Args:
str: move direction
Returns:
"""
for s in str:
if s == 'N':
self.move_north()
elif s == 'S':
self.move_south()
elif s == 'W':
self.move_west()
elif s == 'E':
self.move_east()
def set_top(self, value):
"""
Args:
value: target_value
"""
counter = 0
while counter < 4:
if self.vertical[0] == value:
self.map_values()
return
else:
self.roll_dice("S")
counter += 1
counter = 0
while counter < 4:
if self.vertical[0] == value:
self.map_values()
return
else:
self.roll_dice("W")
counter += 1
def set_front(self, value):
"""
Args:
value: target value
"""
counter = 0
while counter < 4:
if self.vertical[1] == value:
self.map_values()
return
else:
self.roll_dice("SWN")
counter += 1
def move_south(self):
"""move this dice towered north
"""
self.vertical = (self.vertical * 2)[3:7]
self.horizontal[1] = self.vertical[0]
self.horizontal[3] = self.vertical[2]
def move_north(self):
"""move this dice towered south
"""
self.vertical = (self.vertical * 2)[1:5]
self.horizontal[1] = self.vertical[0]
self.horizontal[3] = self.vertical[2]
def move_east(self):
"""move this dice towered east
"""
self.horizontal = (self.horizontal * 2)[3:7]
self.vertical[0] = self.horizontal[1]
self.vertical[2] = self.horizontal[3]
def move_west(self):
"""move this dice towered west
"""
self.horizontal = (self.horizontal * 2)[1:5]
self.vertical[0] = self.horizontal[1]
self.vertical[2] = self.horizontal[3]
def map_values(self):
self.numbers[1] = self.vertical[0]
self.numbers[2] = self.vertical[1]
self.numbers[3] = self.horizontal[2]
self.numbers[4] = self.horizontal[0]
self.numbers[5] = self.horizontal[3]
self.numbers[6] = self.vertical[3]
def get_top(self):
return self.vertical[0]
dice = Dice([int(x) for x in raw_input().split()])
number_of_questions = int(raw_input())
counter = 0
while counter < number_of_questions:
[top, front] = [int(x) for x in raw_input().split()]
dice.set_top(top)
dice.set_front(front)
print(dice.numbers[3])
counter += 1 | Aasthaengg/IBMdataset | Python_codes/p02384/s814678894.py | s814678894.py | py | 3,383 | python | en | code | 0 | github-code | 90 |
34855857663 | #Write a program that accepts a sequence of whitespace separated words as input and
#prints the words after removing all duplicate words and sorting them alphanumerically.
#Suppose the following input is supplied to the program:
#hello world and practice makes perfect and hello world again
#Then, the output should be:
#again and hello makes perfect practice world
def sortWords():
word = []
word = input("Enter the string :- ").split(' ')
new_word = list(set(word))
new_word.sort()
print(' '.join(new_word))
sortWords()
| mukulverma2408/PracticeGeeksforGeeks | PythonPracticeQuestion-Part2/Git-Ques10.py | Git-Ques10.py | py | 545 | python | en | code | 0 | github-code | 90 |
39271215519 | # ---------------------------------------------------------------------------- #
# Imports #
# ---------------------------------------------------------------------------- #
# Server Stuff
from flask import Flask, render_template, request, send_file
from flask_socketio import SocketIO, emit
from flask_sqlalchemy import SQLAlchemy
# Misc
import socket as _socket
import urllib.parse
import threading
import pathlib
import logging
import random
import string
import shutil
import base64
import json
import time
import os
# Cuid
from cuid import cuid
# ---------------------------------------------------------------------------- #
# Initialize variables #
# ---------------------------------------------------------------------------- #
cfgPath = os.path.join(os.path.expanduser("~"), ".PaddeCraftSoftware", "TouchPanel")
app = Flask("TouchPanel", root_path=pathlib.Path(__file__).parent.absolute())
app.config["SECRET_KEY"] = "SECRET-" + (
"".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(128))
)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(cfgPath, "data.db")
pathlib.Path(cfgPath).mkdir(exist_ok=True, parents=True)
db = SQLAlchemy(app)
socket = SocketIO(app)
closeTab = "<script>window.close()</script>"
pageSizes = [
{"id": 1, "name": "5x3"},
{"id": 2, "name": "7x5"},
{"id": 3, "name": "9x6"},
{"id": 4, "name": "15x10"},
]
# ---------------------------------------------------------------------------- #
# Prevent flask logger #
# ---------------------------------------------------------------------------- #
logging.getLogger("werkzeug").disabled = True
# ---------------------------------------------------------------------------- #
# Database models #
# ---------------------------------------------------------------------------- #
class Page(db.Model):
cuid = db.Column(db.String(32), primary_key=True, unique=True)
name = db.Column(db.String(32))
main = db.Column(db.Boolean)
type = db.Column(db.Integer)
# Types cols x rows
# 1: 5x3
# 2: 7x5
# 3: 9x6
# 4: 15x10
class Button(db.Model):
cuid = db.Column(db.String(32), primary_key=True, unique=True)
pageCuid = db.Column(db.String(32))
index = db.Column(db.Integer)
name = db.Column(db.String(32))
icon = db.Column(db.String(128))
action = db.Column(db.String(128))
# ---------------------------------------------------------------------------- #
# Create everything if not existing #
# ---------------------------------------------------------------------------- #
if not os.path.isfile(os.path.join(cfgPath, "data.db")):
print("Generating example page...")
os.makedirs(os.path.join(cfgPath, "scripts"), exist_ok=True)
os.makedirs(os.path.join(cfgPath, "icons"), exist_ok=True)
with open(os.path.join(cfgPath, "scripts", "noneaction.py"), "w+") as f:
f.write("exit(0)")
with app.app_context():
db.create_all()
examplePage = Page(cuid=cuid(), name="Example Page", main=True, type=1)
db.session.add(examplePage)
for x in range(12):
exampleBtn = Button(
cuid=cuid(),
index=x,
pageCuid=examplePage.cuid,
name="Nothingness " + str(x + 1),
icon="cross.jpg",
action="noneaction.py",
)
db.session.add(exampleBtn)
db.session.commit()
shutil.copyfile(
os.path.join(pathlib.Path(__file__).parent.absolute(), "cross.jpg"),
os.path.join(cfgPath, "icons", "cross.jpg"),
)
# ---------------------------------------------------------------------------- #
# Code functions #
# ---------------------------------------------------------------------------- #
def _loadPage(id):
emit("loadpage", generatePageJson(id))
# ---------------------------------------------------------------------------- #
# Helper functions #
# ---------------------------------------------------------------------------- #
def execPyCode(code, btnid):
exec(code, {"btnid": btnid, "loadPage": _loadPage})
def generatePageJson(pageCuid):
page = Page.query.filter_by(cuid=pageCuid).first()
data = {
"cuid": page.cuid,
"name": page.name,
"type": page.type,
"buttons": [],
"pages": [],
}
for pge in Page.query.all():
data["pages"].append({"name": pge.name, "cuid": pge.cuid})
for btn in Button.query.filter_by(pageCuid=page.cuid).all():
data["buttons"].append(
{"cuid": btn.cuid, "name": btn.name, "icon": btn.icon, "index": btn.index}
)
return data
# ---------------------------------------------------------------------------- #
# Socket interactions #
# ---------------------------------------------------------------------------- #
@socket.on("run")
def runaction(btnid):
btn = Button.query.filter_by(cuid=btnid).first()
file = btn.action
try:
with open(os.path.join(cfgPath, "scripts", file), encoding="UTF-8") as f:
content = f.read()
execPyCode(content, btnid)
except Exception as e:
emit("actionError", {"msg": str(e), "fname": file})
@socket.on("loadLandingPage")
def loadLandingPage():
emit("loadpage", generatePageJson(Page.query.filter_by(main=True).first().cuid))
@socket.on("loadPage")
def loadPage(id):
emit("loadpage", generatePageJson(id))
# ---------------------------------------------------------------------------- #
# HTTP interactions #
# ---------------------------------------------------------------------------- #
@app.route("/icon/<file>/")
def loadIcon(file):
return send_file(os.path.join(cfgPath, "icons", file))
@app.route("/")
def main():
return render_template("main.html", edit=False)
@app.route("/edit")
def edit():
return render_template("main.html", edit=True)
@app.route("/edit/btn", methods=["GET", "POST"])
def editBtn():
if request.method == "GET":
actions = os.listdir(os.path.join(cfgPath, "scripts"))
icons = os.listdir(os.path.join(cfgPath, "icons"))
if request.args.get("mode") == "new":
return render_template(
"editbtn.html",
actions=actions,
icons=icons,
action=actions[0],
isNew=False,
icon=icons[0],
)
else:
btn = Button.query.filter_by(cuid=request.args.get("id")).first()
action = btn.action
name = btn.name
icon = btn.icon
btnData = {"name": btn.name, "icon": btn.icon, "action": btn.action}
with open(os.path.join(cfgPath, "icons", btn.icon), "rb") as _icon:
btnData["icon_b64"] = base64.b64encode(_icon.read()).decode("utf-8")
with open(os.path.join(cfgPath, "scripts", btn.action), "rb") as _action:
btnData["script_64"] = base64.b64encode(_action.read()).decode("utf-8")
return render_template(
"editbtn.html",
btnData=urllib.parse.quote(json.dumps(btnData)),
actions=actions,
icons=icons,
action=action,
isNew=False,
name=name,
icon=icon,
)
else:
new = request.args.get("mode") == "new"
page = request.args.get("page")
name = request.form.get("name")
icon = request.form.get("icon")
action = request.form.get("action")
if new:
index = request.args.get("idx")
btn = Button(
cuid=cuid(),
pageCuid=page,
name=name,
icon=icon,
action=action,
index=index,
)
db.session.add(btn)
db.session.commit()
else:
id = request.args.get("id")
btn = Button.query.filter_by(cuid=id).first()
btn.name = name
btn.icon = icon
btn.action = action
db.session.commit()
emit("loadpage", generatePageJson(page), namespace="/", broadcast=True)
return closeTab
@app.route("/edit/deletebtn")
def deleteBtn():
id = request.args.get("id")
page = request.args.get("page")
btn = Button.query.filter_by(cuid=id).first()
db.session.delete(btn)
db.session.commit()
emit("loadpage", generatePageJson(page), namespace="/", broadcast=True)
return closeTab
@app.route("/edit/deletepage")
def deletePage():
id = request.args.get("id")
page = Page.query.filter_by(cuid=id).first()
if page.main:
Page.query.filter_by(main=False).first().main = True
db.session.delete(page)
loadid = Page.query.filter_by(main=True).first().cuid
emit("loadpage", generatePageJson(loadid), namespace="/", broadcast=True)
db.session.commit()
return closeTab
@app.route("/edit/makemain")
def makePageMain():
id = request.args.get("page")
old = Page.query.filter_by(main=True).first()
old.main = False
new = Page.query.filter_by(cuid=id).first()
new.main = True
db.session.commit()
return "Success"
@app.route("/edit/page", methods=["GET", "POST"])
def editPage():
edit = request.args.get("mode") == "edit"
if request.method == "GET":
pagecount = Page.query.count()
if edit:
page = Page.query.filter_by(cuid=request.args.get("page")).first()
return render_template(
"editpage.html",
edit=True,
pagecnt=pagecount,
sizes=pageSizes,
name=page.name,
)
else:
return render_template(
"editpage.html",
edit=False,
pagecnt=pagecount,
sizes=pageSizes,
name="",
)
else:
if edit:
page = Page.query.filter_by(cuid=request.args.get("page")).first()
page.name = request.form.get("name")
db.session.commit()
emit("loadpage", generatePageJson(page.cuid), namespace="/", broadcast=True)
else:
page = Page(
cuid=cuid(),
name=request.form.get("name"),
main=False,
type=request.form.get("type"),
)
db.session.add(page)
db.session.commit()
emit("loadpage", generatePageJson(page.cuid), namespace="/", broadcast=True)
return closeTab
# ---------------------------------------------------------------------------- #
# Run function #
# ---------------------------------------------------------------------------- #
def run():
def notifyUp():
time.sleep(1.4)
print("\nThe server should be up.")
print("Detecting ip address...")
# Get ip, source: https://stackoverflow.com/a/1267524
ip = (
(
[
ip
for ip in _socket.gethostbyname_ex(_socket.gethostname())[2]
if not ip.startswith("127.")
]
or [
[
(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close())
for s in [_socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM)]
][0][1]
]
)
+ [None]
)[0]
if ip == None:
print("Couldn't detect your ip address.")
else:
print("Running on " + ip + ":8811")
print("\nPress ^C to stop the server.")
print("Starting Server...")
threading.Thread(target=notifyUp).start()
socket.run(app, host="0.0.0.0", port="8811", allow_unsafe_werkzeug=True)
if __name__ == "__main__":
print("It`s recommended run TouchPanel using the 'touchpanel' command.")
run()
| PaddeCraft/TouchPanel | touchpanel/__main__.py | __main__.py | py | 12,584 | python | en | code | 0 | github-code | 90 |
24384812017 | #!/usr/bin/env python
from __future__ import print_function
import sys
import os.path
import argparse
import re
from subprocess import call
"""
Script for preparing and running deTIN
"""
epi = ('\
\n\
Make the preparation for CANVAS, test tumour and normal sample\n\
\n\
\n\
')
# Describe what the script does
parser = argparse.ArgumentParser(description='This script writes the commands for running deTIN tumour-in-normal subtraction', epilog= epi, formatter_class=argparse.RawTextHelpFormatter)
# Get inputs
parser.add_argument('-t', '--tumour_bam', default=None, dest='tum', action='store', required=True, help="Tumour.bam file full path")
parser.add_argument('-v', '--vcf', default=None, dest='vcf', action='store', required=True, help="Somatic VCF file")
parser.add_argument('-n', '--nvcf', default=None, dest='nvcf', action='store', required=True, help="Normal variants VCF file")
parser.add_argument('-o', '--out_folder', default=None, dest='out', action='store', required=True, help="Output folder full path")
gc_prof = '/home/mzarowiecki/bin/hartwigmedicalfoundation/GC_profile.hg38.1000bp.cnp'
jar_loc = '/home/mzarowiecki/bin/hartwigmedicalfoundation/hmftools_pipeline_v4_3'
circos_loc = '/home/mzarowiecki/bin/circos-0.69-6/bin/circos'
# Needs a BED file with human variants
#human_bed = '/home/mzarowiecki/scratch/REF/af-only-gnomad.hg38.01.chr.bed'
human_bed = '/home/mzarowiecki/scratch/REF/GermlineHetPon.hg38.bed'
# Check for no input
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# Check if input files exist
if not os.path.isfile(args.tum)==True:
print("Cannot find input file ",args.tum)
sys.exit(1)
# Check if input files exist
if not os.path.isfile(args.vcf)==True:
print("Cannot find input file ",args.vcf)
sys.exit(1)
# Check if NVCF file exits
if not os.path.isfile(args.nvcf)==True:
print("Cannot find input file ",args.nvcf)
sys.exit(1)
# Output will be created if it doesnt exist
# Create prefixes and files
tum=args.tum.rstrip()
tumpx = tum.split("/")[-1]
tumpx = ''.join(tumpx.split(".bam")[0:-1])
sh=args.out + '/' + tumpx + '.sh'
f = open(sh, 'w')
# Print bsub header
print ('#!/bin/bash \n\
\n\
# a template to run canvas-spw on trio samples\n\
# when multisample SNV vcfs already created for B allele frequencies\n\
\n\
# the filename for STDOUT\n\
#BSUB -o %s/%s.o\n\
#BSUB -e %s/%s.e\n\
\n\
# The queue to which the job is to be submitted\n\
#BSUB -q bio\n\
# project code for analysis, research and development activities\n\
#BSUB -P Analysis\n\
\n\
# Memory usage configuration\n\
#BSUB -R "span[hosts=1]" \n\
#BSUB -R "select[largedsk]" \n\
#BSUB -R "hname!=\'hpc-prod-grid-lsfexec-001\' && hname!=\'hpc-prod-grid-lsfexec-002\'" \n\
#BSUB -n 4\n\
\n\
# add canvas application profile so that jobs are not preemtied too many times (see https://jira.extge.co.uk/browse/INFRA-6931)\n\
#BSUB -app canvas\n\
' % (args.out,tumpx,args.out,tumpx), file=f )
# Print file locations
print ('\n\
source /etc/profile.d/modules.sh\n\
\n\
module load canvas/1.39.0.1598\n\
module load bcftools/1.5\n\
\n\
TODAY=`date +%%Y-%%m-%%d`\n\
\n\
# refence files\n\
\n\
GENOME=/home/mzarowiecki/scratch/REF/reference_no_alt\n\
KMER=/home/mzarowiecki/scratch/REF/reference_no_alt/kmer.GCA_000001405.15_GRCh38_no_alt_plus_hs38d1_analysis_set.fa\n\
#KMER=/genomes/scratch/dkasperaviciute/sv_cnv/canvas-spw/edico/reference_no_alt/kmer.GCA_000001405.15_GRCh38_no_alt_plus_hs38d1_analysis_set.fa\n\
#GENOME=/genomes/scratch/dkasperaviciute/sv_cnv/canvas-spw/edico/reference_no_alt\n\
#KMER=/genomes/scratch/dkasperaviciute/sv_cnv/canvas-spw/edico/reference/kmer.GCA_000001405.15_GRCh38_full_plus_hs38d1_analysis_set.fa\n\
#GENOME=/genomes/scratch/dkasperaviciute/sv_cnv/canvas-spw/edico/reference\n\
FILTER=/genomes/resources/genomeref/Illumina/Homo_sapiens/NCBI/hg38-NSv6/Annotation/Canvas/filter13.bed\n\
PLO=/home/mzarowiecki/scratch/Benchmark_CNV_callers/Canvas/Canvas139/male_ploidy.vcf.gz\n\
\n\
#input files\n\
TBAM=%s\n\
VCF=%s\n\
NVCF=%s\n\
NAME=%s\n\
\n\
# output folder\n\
CANVAS_OUTPUT_DIR=%s\n\
mkdir -p $CANVAS_OUTPUT_DIR\n\
cd $CANVAS_OUTPUT_DIR\n\
\n\
' % (args.tum, args.vcf, args.nvcf, tumpx, args.out), file=f)
# Germline free
print ('canvas Somatic-WGS -b $TBAM --somatic-vcf=$VCF --sample-b-allele-vcf=$NVCF -n $NAME -o $CANVAS_OUTPUT_DIR/%s -r $KMER -g $GENOME -f $FILTER --ploidy-vcf=$PLO' % (tumpx), file=f)
f.close()
print ('bsub -J %s < %s' % (tumpx, sh) )
quit()
| MagdalenaZZ/Python_ditties | run_CANVAS.py | run_CANVAS.py | py | 4,533 | python | en | code | 0 | github-code | 90 |
5793347765 | #!/usr/bin/env python3
from urllib.parse import quote
from urllib.request import Request, urlopen, HTTPError
import json, csv
from settings import *
import ast
import argparse
parser = argparse.ArgumentParser(description='debut / fin / nombres de datasets')
parser.add_argument('-s', '--start', type=int, default=0, help='debut de ligne' )
parser.add_argument('-e', '--end', type=int, default=0, help='s arrete a stop-1 , si O ou null alors le script va a la fin ')
parser.add_argument('-c', '--count', type=int, default=0, help='nombre de boucles a effectuer si 0 ou null alors pas compris en compte' )
args = parser.parse_args()
# LOOP = 17688
# #res count 2
# loop_count_res = 75748
loop_count = 0
loop_count_res = 0
loop_serie = args.count
loop_start = args.start
loop_end = args.end
skip_dataset = False
skip_resource = False
debug_datatset = False
debug_resource = False
for row in csv.DictReader(open("data/dataset_fr.csv", encoding='utf-8'), delimiter=";"):
print("LOOP = ", loop_count)
# skip start
if loop_start > loop_count:
loop_count += 1
continue
# skip end
if loop_end <= loop_count and loop_end > 0:
break
# skip end
if loop_start + loop_serie <= loop_count and loop_serie > 0:
break
dataset_dict = {}
# pour resources
package_id = row['id']
dataset_dict['name'] = package_id
dataset_dict['title'] = row['title']
dataset_dict['notes'] = row['description']
dataset_dict['url'] = row['uri']
organization = ast.literal_eval(row['organization'])
dataset_dict['owner_org'] = organization['id']
if debug_datatset:
print("----------dataset------------")
# print(row)
print(quote(json.dumps(row)))
print(type(row))
# dict_keys(['uri', 'description', 'id', 'title', 'resources', 'organization', 'owner'])
print(row.keys())
print("----------organization------------")
print(row['organization'])
# dict_keys(['class', 'uri', 'name', 'id', 'slug', 'page', 'logo'])
print(organization.keys())
print("----------dataset to SEND------------")
# print(dataset_dict)
# --------------------------INSERTION / CREATION dataset --------------------------------
if not skip_dataset:
request = Request('http://%s/api/action/package_create'%ckan_host)
request.add_header('Authorization', ckan_api_key)
data_string = quote(json.dumps(dataset_dict)).encode('utf8')
try:
response = urlopen(request, data_string)
# Use the json module to load CKAN's response into a dictionary.
response_dict = json.loads(response.read().decode())
assert response_dict['success'] is True
except HTTPError as error:
print("{}: {}".format(row['title'], error.read()))
resources = ast.literal_eval(row['resources'])
if debug_resource:
print("----------resources------------")
print(row['resources'])
print(resources[0].keys())
# dict_keys(['checksum', 'url', 'type', 'description', 'mime', 'created_at', 'id', 'last_modified',
# 'published', 'is_available', 'format', 'size', 'metrics', 'title'])
print('#res count', len(resources))
for res in resources:
loop_count_res += 1
res['package_id'] = package_id
res['resource_type'] = res['type']
res['mimetype'] = res['mime']
res['created'] = res['created_at']
if debug_resource:
print("----------res------------")
print(res)
# --------------------------INSERTION / CREATION resource --------------------------------
if not skip_resource:
request_res = Request('http://%s/api/action/resource_create'%ckan_host)
request_res.add_header('Authorization', ckan_api_key)
data_string = quote(json.dumps(res)).encode('utf8')
try:
response = urlopen(request_res, data_string)
# Use the json module to load CKAN's response into a dictionary.
response_dict = json.loads(response.read().decode())
assert response_dict['success'] is True
except HTTPError as error:
print("{}: {}".format(res['description'], error.read()))
loop_count += 1
print('loop_count_res = ', loop_count_res)
# ----------------------------------------------------------
# request = Request('http://%s/api/action/package_create'%ckan_host)
# request.add_header('Authorization', ckan_api_key)
# try:
# response = urlopen(request, data_string)
# # Use the json module to load CKAN's response into a dictionary.
# response_dict = json.loads(response.read().decode())
# assert response_dict['success'] is True
# except HTTPError as error:
# print("{}: {}".format(organization["name"], error.read()))
| Open-Initiative/epidemium-import | import_datasets.py | import_datasets.py | py | 4,915 | python | en | code | 0 | github-code | 90 |
29939992881 | import urllib2
import requests
import json
import socket
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
def downloadFile(url, magicNumber = None, maxSize = 10 * 1024 * 1024):
try:
f = urllib2.urlopen(url, timeout=10, context=ctx)
if magicNumber is None:
entireFile = f.read(maxSize + 1)
else:
if f.read(len(magicNumber)) != magicNumber:
f.close()
return None
entireFile = magicNumber + f.read(maxSize + 1)
f.close()
if len(entireFile) > maxSize:
return None
return entireFile
except:
return None
def doApiReq(url, post = None):
try:
if post is None:
r = requests.get(url)
else:
r = requests.post(url, data=post)
#print url
#print post
#print r.text
data = json.loads(r.text)
if data['error'] != 0 or 'data' not in data:
if 'debug' in data:
return (False, data['message'] + ' --- ' + data['debug'])
else:
return (False, data['message'])
else:
return (True, data['data'])
except:
return (False, "Connection exception")
def getMyIp():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ipStr = s.getsockname()[0]
s.close()
return ipStr
except:
pass
return None | JaanusKaapPublic/Rehepapp | Scripts/Libs/Web.py | Web.py | py | 1,249 | python | en | code | 54 | github-code | 90 |
28365412298 | class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverse(self, node):
if node==None or node.next==None:
return node
last_node=self.reverse(node.next)
node.next.next=node
node.next=None
return last_node
def reverse_v1(self, node):
pre=None
curr=node
next=node
while(curr!=None):
next=curr.next
curr.next=pre
pre=curr
curr=next
return pre
def reverseN(self, node, n):
if n==1:
successor=node.next
return node
last_node=self.reverseN(node.next, n-1)
node.next.next=node
node.next=successor
return last_node
def reversetBetween(self, node, left, right):
if left==1:
return self.reversetN(node, right)
node.next=self.reversetBetween(node.next, left-1, right-1)
return node
def reverseBetweenNode(self, node1, node2):
pre=None
curr=node1
next=node1
while(curr!=node2):
next=curr.next
curr.next=pre
pre=curr
curr=next
return pre
def reverseKGroup(self, node, k):
if node==None:
return
node_left=node
node_right=node
for i in range(k):
if node_right==None:
return node
node_right=node_right.next
head=self.reverseBetweenNode(node_left,node_right)
node_left.next=self.reverseKGroup(node_right, k)
return head
| neverset123/coding_leetcode | python/leetcode/linked_list/revert_linked_list.py | revert_linked_list.py | py | 1,659 | python | en | code | 0 | github-code | 90 |
20927858043 | from jinja2 import Environment, FileSystemLoader
import markdown2
from collections import namedtuple
from glob import glob
from pathlib import Path
Info = namedtuple(
"Info", ["name", "profession", "email", "linkedin", "github", "orcid"]
)
Site = namedtuple("Site", ["title", "url", "content"])
def replace_umlauts(html: str) -> str:
"""Replaces German umlauts with their HTML entities"""
html = html.replace("ä", "ä")
html = html.replace("ö", "ö")
html = html.replace("ü", "ü")
html = html.replace("Ä", "Ä")
html = html.replace("Ö", "Ö")
html = html.replace("Ü", "Ü")
html = html.replace("ß", "ß")
return html
if __name__ == "__main__":
info = Info(
name="Hannah E. McCall",
profession="Astrophysics PhD Candidate",
email="hannahmccall@uchicago.edu",
linkedin="https://www.linkedin.com/in/hannah-mccall-772194165/",
github="https://github.com/hannahmccall",
orcid="https://orcid.org/0000-0003-3537-3491",
)
contents = sorted(glob("content/*.md"))
sites = []
for i, content in enumerate(contents):
with open(content, "r") as f:
content = f.read()
title = content.splitlines()[0].strip("# ")
html = replace_umlauts(markdown2.markdown(content, extras=["tables"]))
if i == 0:
url = "index.html"
else:
url = title.lower().replace(" ", "_") + ".html"
site = Site(title, url, html)
sites.append(site)
build_path = Path("./")
env = Environment(loader=FileSystemLoader("."))
template = env.get_template("template.html")
for site in sites:
html_site = template.render(content=site.content, info=info, links=sites)
filename = site.url
with open(build_path / filename, "w") as f:
f.write(html_site)
| hannahmccall/hannahmccall.github.io | generate.py | generate.py | py | 1,925 | python | en | code | 0 | github-code | 90 |
10385078305 | from pprint import pprint
from urllib.parse import urlencode
import requests
import time
import json
import os
import vk_params
VERSION = '5.67'
API_GET_GROUP = 'https://api.vk.com/method/groups.get'
API_GET_FRIENDS = 'https://api.vk.com/method/friends.get'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
ERROR_REQUESTS = 6
ERROR_PAGES = 18
APP_ID = 6166373
# Получаем ссылку на токен
def get_token(file_name):
token_file_path = os.path.realpath(file_name)
if os.path.exists(token_file_path) is False:
auth_data = {
'client_id': APP_ID,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'display': 'mobile',
'scope': 'friends, groups',
'response_type': 'token',
'v': VERSION
}
print(urlencode(auth_data))
print('?'.join(
(AUTHORIZE_URL, urlencode(auth_data))
))
return
# Запрос статуса страницы
# test = requests.get(api_get_friends, make_vk_params())
# print('Статус страницы - {}'.format(test.status_code))
# Обработка возникающих ошибок
def do_request(url, params):
while True:
res = requests.get(url, params).json()
if 'error' in res:
if res['error']['error_code'] == ERROR_REQUESTS:
time.sleep(0.04)
continue
elif res['error']['error_code'] == ERROR_PAGES:
return None
else:
print('{}'.format(res['error']['error_msg']))
break
else:
return res
# Получаем список моих друзей
def get_friends_list():
params = {
'extended': 1,
'fields': 'members_count',
}
params_for_me = vk_params.make_vk_params(**params)
friends_list = []
response_get_my_friends = do_request(API_GET_FRIENDS, params_for_me)
for friend in response_get_my_friends['response']['items']:
friends_list.append(friend['id'])
return friends_list
# Получаем список моих групп
def get_groups():
groups = []
params = {'extended': 1,
'fields':'members_count',
'id': 63364192
}
params_for_me = vk_params.make_vk_params(**params)
response_get_my_groups = do_request(API_GET_GROUP, params_for_me)
for group in response_get_my_groups['response']['items']:
groups.append({'Name': group['name'], 'id': group['id'], 'members_count': group['members_count']})
return groups
# Получаем список групп моих друзей
def get_groups_friends():
friends_groups_list = []
friends_list = get_friends_list()
friends_count = len(friends_list)
for i, friend in enumerate(friends_list):
params = {
'count': 1000,
'user_id': friend
}
params_for_friends = vk_params.make_vk_params(**params)
excess = friends_count - i
print('Всего друзей - {}. Осталось проверить - {}'.format(friends_count, excess))
response_friends_group = do_request(API_GET_GROUP, params_for_friends)
if response_friends_group is None:
continue
else:
friends_groups_list.extend(response_friends_group['response']['items'])
friends_groups_set = set(friends_groups_list)
return friends_groups_set
# Основная функция
def main():
get_token(vk_params.file_name())
private_groups = []
friends_groups_set = get_groups_friends()
my_groups = get_groups()
for group_one in my_groups:
if group_one['id'] not in friends_groups_set:
private_groups.append({'id': group_one['id'], 'Name': group_one['Name'], 'members_count': group_one['members_count']})
with open('new_file.json', 'w') as f:
json.dump(private_groups, f)
return private_groups
main()
| VinGeorge/etcetera | vk_parsing.py | vk_parsing.py | py | 3,968 | python | en | code | 0 | github-code | 90 |
27566256342 | import numpy as np
class DisjointSet:
def __init__(self, elements):
self.elements = elements
self.cant = dict()
self.parents = dict()
for element in elements:
self.parents[element] = element
self.cant[element] = 1
def getParent(self, element):
if self.parents[element] == element:
return element
else:
return self.getParent(self.parents[element])
def joint(self, elementA, elementB):
parent1 = self.getParent(elementA)
parent2 = self.getParent(elementB)
if parent1 != parent2:
if self.cant[parent1] >= self.cant[parent2]:
self.parents[parent2] = parent1
self.cant[parent1] += self.cant[parent2]
else:
self.parents[parent1] = parent2
self.cant[parent2] += self.cant[parent1] | juandamdc/MFA_WTMM | utils/disjointSet.py | disjointSet.py | py | 907 | python | en | code | 1 | github-code | 90 |
28748239571 | fin=open("mixmilk.in","r")
fout=open("mixmilk.out","w")
arr=[]
for i in range(3):
s=fin.readline().strip().split()
for i in range(len(s)):
s[i]=int(s[i])
arr.append(s)
curr=0
for i in range(100):
if curr==2:
next=0
else:
next=curr+1
if arr[curr][1]<=arr[next][0]-arr[next][1]:
arr[next][1]+=arr[curr][1]
arr[curr][1]=0
else:
arr[curr][1]-=arr[next][0]-arr[next][1]
arr[next][1]=arr[next][0]
curr=next
for i in arr:
fout.write(str(i[1]))
fout.write("\n") | SriramV739/CP | USACO/Contest/Bronze/2018December/mixmilk.py | mixmilk.py | py | 574 | python | en | code | 0 | github-code | 90 |
20404098904 | import time
from clients.AbstractClient import AbstractClient
import config
import sqlite3
class SqliteClient(AbstractClient):
initialization_query = None
def __init__(self):
self.db = sqlite3.connect('sqlitedb', timeout=100)
self.cursor = self.db.cursor()
self.cursor.execute('PRAGMA journal_mode=WAL;')
if SqliteClient.initialization_query:
self.cursor.execute(SqliteClient.initialization_query)
self.cursor.execute('PRAGMA wal_autocheckpoint=1000;')
self.cursor.execute('attach database sqlitedb as t;')
self.db.commit()
def execute(self, query_text):
for i in xrange(1,1000):
try:
self.cursor = self.db.cursor()
self.cursor.execute(query_text)
self.db.commit()
return self._result_to_string()
except sqlite3.OperationalError as e:
if not 'database is locked' in str(e):
raise e
time.sleep(.01)
raise sqlite3.OperationalError('database is locked')
def _result_to_string(self):
result = self.cursor.fetchall()
return str(result) | robertclaus/python-database-concurrency-control | clients/SqliteClient.py | SqliteClient.py | py | 1,192 | python | en | code | 0 | github-code | 90 |
29197722426 | # -*- coding: utf-8 -*-
"""
USE: python bif_exel2brical.py infile outfile
"""
import sys
import math
import json
import openpyxl
def createModules(ws):
modules = {}
for p in range(2):
for i in range(ws.max_row - 1):
val = ws.cell(row=i + 2, column=1).value
if val is not None and val.strip() != "":
name = val.strip().replace(' ', '_').replace(':', '.')
clm4 = ws.cell(row=i + 2, column=5).value # Functionality
if clm4 is None:
functionality = ""
else:
functionality = clm4.strip()
clm6 = ws.cell(row=i + 2, column=7).value # implementation
if clm6 is None:
implClass = ""
else:
implClass = clm6.strip()
labels = ws.cell(row=i + 2, column=2).value
if labels is None:
labels = ""
parts = ws.cell(row=i + 2, column=4).value
submodules = []
if parts is not None:
submodules = parts.split(';')
for j in range(len(submodules)):
submodules[j] = submodules[j].strip()
module = {"Name": name, "Comment": labels + ":" + functionality}
if len(submodules) > 0:
module["SubModules"] = submodules
else:
module["ImplClass"] = implClass
modules[name] = module
return modules
def upper_p(module1, module2, modules):
if 'SubModules' in modules[module1]:
if module2 in modules[module1]['SubModules']:
return True
else:
for submodule in modules[module1]['SubModules']:
if upper_p(submodule, module2, modules):
return True
return False
def createConnections(ws, modules):
connections = []
ports = []
for i in range(ws.max_row - 1):
fromCircuit = ""
col1 = ws.cell(row=i + 2, column=1).value # fromCircuit
if col1 is not None:
col1 = col1.strip()
fromCircuit = col1.strip().replace(' ', '_').replace(':', '.')
fromPort = ""
col2 = ws.cell(row=i + 2, column=2).value # fromPort
if col2 is not None:
fromPort = col2.strip()
toCircuit = ""
col3 = ws.cell(row=i + 2, column=3).value # toCircuit
if col3 is not None:
col3 = col3.strip()
toCircuit = col3.strip().replace(' ', '_').replace(':', '.')
toPort = ""
col4 = ws.cell(row=i + 2, column=4).value # toPort
if col4 is not None:
toPort = col4.strip()
if fromCircuit == "" or toCircuit == "":
continue
if fromCircuit not in modules:
sys.stderr.write("WARNING: " + fromCircuit + "is not defined in the Circuit sheet!\n")
if toCircuit not in modules:
sys.stderr.write("WARNING: " + toCircuit + "is not defined in the Circuit sheet!\n")
connectionID = fromCircuit + "-" + toCircuit
shape = []
col5 = ws.cell(row=i + 2, column=5).value # shape
if col5 is not None:
col5 = str(col5).strip()
shape = col5.split(",")
for j in range(len(shape)):
try:
shape[j] = math.floor(float(shape[j]))
except ValueError:
sys.stderr.write("WARNING: the shape element in " + connectionID + "is not an integer!\n")
connection = {"Name": connectionID, "FromModule": fromCircuit, "FromPort": fromPort,
"ToModule": toCircuit, "ToPort": toPort}
connections.append(connection)
ports = add_ports(connection, shape, modules, ports)
return connections, ports
def add_ports(connection, shape, modules, ports):
fromModule = connection["FromModule"]
toModule = connection["ToModule"]
if upper_p(fromModule, toModule, modules):
fromType = "Input"
toType = "Input"
elif upper_p(toModule, fromModule, modules):
fromType = "Output"
toType = "Output"
else:
fromType = "Output"
toType = "Input"
fromPort = {"Name": connection["FromPort"], "Type": fromType, "Shape": shape}
if "Ports" in modules[fromModule]:
if not defined_port(fromPort, modules[fromModule]["Ports"]):
modules[fromModule]["Ports"].append(fromPort)
else:
modules[fromModule]["Ports"] = [fromPort]
toPort = {"Name": connection["ToPort"], "Type": toType, "Shape": shape}
if "Ports" in modules[toModule]:
if not defined_port(toPort, modules[toModule]["Ports"]):
modules[toModule]["Ports"].append(toPort)
else:
modules[toModule]["Ports"] = [toPort]
return ports
def defined_port(port_2B_checked, ports):
for port in ports:
if port_2B_checked["Name"] == port["Name"] and port_2B_checked["Type"] == port["Type"]:
return True
return False
def main():
if len(sys.argv) <= 2:
print("USE: python bif_exel2brical.py infile bifd_url outfile")
exit()
outfilePath = sys.argv[2]
wb = openpyxl.load_workbook(sys.argv[1])
# Defining an ontology
project = wb['Project']
pname = project.cell(row=2, column=1).value
if not pname:
print("Error: no project name")
exit()
description = project.cell(row=2, column=3).value
modules = createModules(wb['Circuit'])
connections, ports = createConnections(wb['BriCA'], modules)
module_array = []
for v in modules.values():
v["Ports"] = sorted(v["Ports"], key=lambda x: (x['Type'], x['Name']))
module_array.append(v)
output = {"Header": {"Type": "A", "Name": pname, "Base": pname, "Comment": description},
"Modules": module_array,
"Connections": connections}
fp = open(outfilePath, 'w')
json.dump(output, fp, indent=1)
fp.close()
if __name__ == '__main__':
main()
| wbap/BriCAL | bif_excel2brical/bif_excel2brical.py | bif_excel2brical.py | py | 6,115 | python | en | code | 5 | github-code | 90 |
14992421542 | import itertools
import pandas as pd
from statsmodels.tsa.stattools import acf
# x = real component, right +ve
# y = imag component, up +ve
WIDTH = 7
_ROCKS = [
[complex(0, 0), complex(1, 0), complex(2, 0), complex(3, 0)],
[complex(1, 0), complex(0, 1), complex(1, 1), complex(2, 1), complex(1, 2)],
[complex(0, 0), complex(1, 0), complex(2, 0), complex(2, 1), complex(2, 2)],
[complex(0, 0), complex(0, 1), complex(0, 2), complex(0, 3)],
[complex(0, 0), complex(0, 1), complex(1, 1), complex(1, 0)],
]
ROCKS = itertools.cycle(_ROCKS)
def run(inputs):
jet = itertools.cycle(inputs)
occupied = set()
highest_rock = -1
down_offset = complex(0, -1)
origin_x = 2
data = {}
previous = 0
N = 1_000 * len(_ROCKS)
for rock_i in range(1, N):
origin = complex(origin_x, highest_rock + 4)
new_rock = [p + origin for p in next(ROCKS)]
while True:
# Sideways
if next(jet) == "<":
offset = complex(-1, 0)
new_pos = [p + offset for p in new_rock]
if all(p.real >= 0 for p in new_pos) and all(
p not in occupied for p in new_pos
):
new_rock = new_pos
else:
offset = complex(1, 0)
new_pos = [p + offset for p in new_rock]
if all(p.real < WIDTH for p in new_pos) and all(
p not in occupied for p in new_pos
):
new_rock = new_pos
# Downwards
new_pos = [p + down_offset for p in new_rock]
if all(p.imag >= 0 for p in new_pos) and all(
p not in occupied for p in new_pos
):
new_rock = new_pos
else:
[occupied.add(p) for p in new_rock]
highest_rock = max(highest_rock, *(p.imag for p in new_rock))
break
data[rock_i] = highest_rock - previous
previous = highest_rock
data = pd.Series(data)
max_offset = None
period = None
max_acf = None
bad_data = {}
for offset in range(N // 3):
acf_values = pd.Series(acf(data.iloc[offset:], nlags=N // 2))
this_max = max(acf_values[1:])
bad_data[offset] = this_max
if max_acf is None or this_max > max_acf:
max_acf = this_max
period = acf_values.iloc[1:].argmax() + 1
max_offset = offset
# This is drity, needs fixing!
max_offset += period # Initial period may require settling down
prediction_x = 1_000_000_000_000
offset_contribution = data.iloc[:max_offset].sum()
post_offset_x = prediction_x - max_offset
n_full_cycles, remaining_x = divmod(post_offset_x, period)
remaining_contribution = data.iloc[max_offset : max_offset + remaining_x].sum()
single_period_contribution = data.iloc[max_offset : max_offset + period].sum()
return (
offset_contribution
+ single_period_contribution * n_full_cycles
+ remaining_contribution
+ 1
)
| jimhendy/AoC | 2022/17/b.py | b.py | py | 3,105 | python | en | code | 0 | github-code | 90 |
29542827837 | # -*- coding: utf-8 -*-
# @Time : 2021/9/12 10:35
# @Author : 模拟卷
# @Github : https://github.com/monijuan
# @CSDN : https://blog.csdn.net/qq_34451909
# @File : 152. 乘积最大子数组.py
# @Software: PyCharm
# ===================================
"""给你一个整数数组 nums ,请你找出数组中乘积最大的连续子数组(该子数组中至少包含一个数字),并返回该子数组所对应的乘积。
示例 1:
输入: [2,3,-2,4]
输出: 6
解释: 子数组 [2,3] 有最大乘积 6。
示例 2:
输入: [-2,0,-1]
输出: 0
解释: 结果不能为 2, 因为 [-2,-1] 不是子数组。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/maximum-product-subarray
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。~
"""
from leetcode_python.utils import *
class Solution:
def __init__(self):
pass
def maxProduct(self, nums: List[int]) -> int:
max_save,min_save,max_all =nums[0],nums[0],nums[0]
for i,num in enumerate(nums):
if i==0:continue
max_save,min_save = max(max(max_save*num,num),min_save*num),min(min(max_save*num,num),min_save*num)
return max(max_all,max_save)
def test(data_test):
s = Solution()
return s.maxProduct(*data_test)
def test_obj(data_test):
result = [None]
obj = Solution(*data_test[1][0])
for fun, data in zip(data_test[0][1::], data_test[1][1::]):
if data:
res = obj.__getattribute__(fun)(*data)
else:
res = obj.__getattribute__(fun)()
result.append(res)
return result
if __name__ == '__main__':
datas = [
[[2,3,0,-1,5,-2,4]],
[[2,3,0,5,-2,4,9]],
# [],
]
for data_test in datas:
t0 = time.time()
print('-' * 50)
print('input:', data_test)
print('output:', test(data_test))
print(f'use time:{time.time() - t0}s') | monijuan/leetcode_python | code/AC2_normal/152. 乘积最大子数组.py | 152. 乘积最大子数组.py | py | 1,980 | python | zh | code | 0 | github-code | 90 |
16077029621 | import os
import joblib
import pandas as pd
import statsmodels.api as sm
class Modeler:
def __init__(self):
self.df = pd.read_csv('C:/Users/abdul/Desktop/FYP/FinalDataset.csv')
try: self.model = joblib.load('models/satisfaction.model')
except: self.model = None
def fit(self):
X = self.df.drop('Compound_Score', axis=1)
Y = self.df['Compound_Score']
X = sm.add_constant(X)
self.model = sm.OLS(Y, X).fit()
joblib.dump(self.model, 'models/satisfaction.model')
def predict(self, measurement):
if not os.path.exists('models/satisfaction.model'):
raise Exception('Model not trained yet. Fit the model first')
#if len(measurement[0]) != 7:
#raise Exception(f'Expected six parameter for predictions but got {measurement}')
prediction = self.model.predict(measurement)
return prediction[0]
| Adeeb-Khoja/Pre-Launch-Forecaster---Data-Science-Project | Models Development/NLP - Sentiment Analysis - Satisfaction Model/deployment/modeler/Modeler.py | Modeler.py | py | 952 | python | en | code | 0 | github-code | 90 |
882682977 | import heapq
file = open("Median.txt", "r")
r_data = file.readlines()
data = [int(x) for x in r_data]
print(data)
medians = []
k = 0
l_heap = []
u_heap = []
while k < len(data):
new = data[k]
l_max = 0
u_min = 0
if len(l_heap) > 0:
l_max = l_heap[0][1]
if len(u_heap) > 0:
u_min = u_heap[0]
if new > l_max:
heapq.heappush(u_heap, new)
else:
heapq.heappush(l_heap, (-new, new))
diff = len(l_heap) - len(u_heap)
if diff > 1:
x = heapq.heappop(l_heap)[1]
heapq.heappush(u_heap, x)
elif diff < -1:
x = heapq.heappop(u_heap)
heapq.heappush(l_heap, (-x, x))
median = None
diff = len(l_heap) - len(u_heap)
if (k+1) % 2 == 0 and diff == 0:
median = l_heap[0][1]
elif (k+1) % 2 == 1 and diff == 1:
median = l_heap[0][1]
elif (k+1) % 2 == 1 and diff == -1:
median = u_heap[0]
else:
print("Something bad happened", l_heap, u_heap)
medians.append(median)
k = k + 1
sum = 0
for t in range(0, 10000):
sum = sum + medians[t]
print(sum % 10000)
| plancker/Algorithms | Course 2/median_maintenance.py | median_maintenance.py | py | 1,124 | python | en | code | 0 | github-code | 90 |
41608107292 | chaa,choo=map(int,input().split())
saaa=[]
for p in range(chaa+1,choo+1):
if p>1:
for f in range(2,p):
if(p%f==0):
break
else:
saa.append(f)
print(len(saa)+1)
| chokkuu1998/david | 5.py | 5.py | py | 189 | python | en | code | 0 | github-code | 90 |
18559041509 | def check():
N,K = map(int, input().split())
total = 0
if K == 0: return N**2
for b in range(1,N+1):
if b <= K:
continue
amari = N-b*(N//b)
test = amari-K+1 if amari >= K else 0
total += (b - K) * (N//b) + test
return total
print(check()) | Aasthaengg/IBMdataset | Python_codes/p03418/s179499244.py | s179499244.py | py | 340 | python | en | code | 0 | github-code | 90 |
11175739171 | from django.shortcuts import render
from django.http import HttpResponse
from django.http.response import HttpResponseRedirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import authenticate,login,logout
#for apis
from .serializers import medicalsummarySerializer,dignosticsresultSerializer,pasthistorySerializer
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import medicalsummary,dignosticsresult,pasthistory
# Create your views here.
def index(request):
return render(request, 'index.html')
def register(request):
if request.method == "POST":
f = UserCreationForm(request.POST)
if f.is_valid():
f.save()
else:
f = UserCreationForm()
return render(request, "register.html", {"form": f})
def signin(request):
if request.method == "POST":
f = AuthenticationForm(request=request, data=request.POST)
if f.is_valid():
un = f.cleaned_data['username']
pd = f.cleaned_data['password']
user = authenticate(username = un, password = pd)
if user is not None:
login(request,user)
return HttpResponseRedirect('/home')
else:
f = AuthenticationForm()
return render(request, 'signin.html', {'form': f})
def home(request):
return render(request, 'home.html', {'name': request.user})
def userLogout(request):
logout(request)
return HttpResponseRedirect('/signin')
# Medical summary api's
# Get all the records
@api_view(['GET'])
def getAllMedicalSummary(request):
if request.method == 'GET':
medicalRecord = medicalsummary.objects.all()
serialize = medicalsummarySerializer(medicalRecord, many=True)
return Response(serialize.data)
# Get only one record based on id
@api_view(['GET'])
def getOneMedicalSummary(request, pk):
if request.method == 'GET':
medicalRecord = medicalsummary.objects.get(id=pk)
serialize = medicalsummarySerializer(medicalRecord, many=False)
return Response(serialize.data)
# Add one record
@api_view(['POST'])
def addOneRecord(request):
if request.method == 'POST':
serialize = medicalsummarySerializer(data=request.data)
if(serialize.is_valid()):
serialize.save()
return Response(serialize.data)
return Response(serialize.errors)
# Update a record based on id
@api_view(['POST'])
def updateRecord(request, pk):
if request.method == 'POST':
medicalRecord = medicalsummary.objects.get(id=pk)
serialize = medicalsummarySerializer(instance=medicalRecord, data=request.data)
if(serialize.is_valid()):
serialize.save()
return Response(serialize.data)
# Delete a record
@api_view(['DELETE'])
def deleteRecord(request, pk):
if request.method == 'DELETE':
medicalRecord = medicalsummary.objects.get(id=pk)
medicalRecord.delete()
return Response("Record deleted successfully")
# class medicalsummaryView(APIView):
# def get(self, request):
# player1 = medicalsummary.objects.all()
# serialize = medicalsummarySerializer(player1, many=True)
# return Response(serialize.data)
# def get(self, request, pk, format=None):
# player1 = medicalsummary.objects.get(id=pk)
# serialize = medicalsummarySerializer(player1, many=False)
# return Response(serialize.data)
# def post(self, request):
# serialize = medicalsummarySerializer(data=request.data)
# if(serialize.is_valid()):
# serialize.save()
# return Response(serialize.data)
# return Response(serialize.errors)
# diagnostics-result api
# view all diagnostics records
@api_view(['GET'])
def getAllDiagnosticResults(request):
if request.method == 'GET':
diagnosticRecord =dignosticsresult.objects.all()
serialize = dignosticsresultSerializer(diagnosticRecord, many=True)
return Response(serialize.data)
#view one diagnostic records
@api_view(['GET'])
def getOneDiagnosticResults(request,pk):
if request.method == 'GET':
diagnosticRecord =dignosticsresult.objects.get(id=pk)
serialize = dignosticsresultSerializer(diagnosticRecord, many=False)
return Response(serialize.data)
# Add one diagnostic-result
@api_view(['POST'])
def addOneDiagnosticRecord(request):
if request.method == 'POST':
serialize = dignosticsresultSerializer(data=request.data)
if(serialize.is_valid()):
serialize.save()
return Response(serialize.data)
return Response(serialize.errors)
# Update Diagnostic result record
@api_view(['POST'])
def updateDiagnosticRecord(request, pk):
if request.method == 'POST':
diagnosticRecord = dignosticsresult.objects.get(id=pk)
serialize = dignosticsresultSerializer(instance= diagnosticRecord , data=request.data)
if(serialize.is_valid()):
serialize.save()
return Response(serialize.data)
# Delete a Diagnostic Result record
@api_view(['DELETE'])
def deleteDiagnosticRecord(request, pk):
if request.method == 'DELETE':
dignosticRecord = dignosticsresult.objects.get(id=pk)
dignosticRecord.delete()
return Response(" Dignostics Result Record deleted successfully")
# Past_History_of_illness api
# View all Patient Past_history_illnesses
@api_view(['GET'])
def getAllPastHistoryIllnessResult(request):
if request.method == 'GET':
pastRecord =pasthistory.objects.all()
serialize = pasthistorySerializer(pastRecord, many=True)
return Response(serialize.data)
#view one Patient Past_history_illnesses record
@api_view(['GET'])
def getOnePastHistoryResults(request,pk):
if request.method == 'GET':
pastRecord =pasthistory.objects.get(id=pk)
serialize = pasthistorySerializer(pastRecord, many=False)
return Response(serialize.data)
# Add one Patient Past_history_illnesses record
@api_view(['POST'])
def addOneIllnessRecord(request):
if request.method == 'POST':
serialize = pasthistorySerializer(data=request.data)
if(serialize.is_valid()):
serialize.save()
return Response(serialize.data)
return Response(serialize.errors)
# Update Patient Past_history_illnesses record
@api_view(['POST'])
def updateIllnessRecord(request, pk):
if request.method == 'POST':
pastRecord = pasthistory.objects.get(id=pk)
serialize = pasthistorySerializer(instance= pastRecord , data=request.data)
if(serialize.is_valid()):
serialize.save()
return Response(serialize.data)
# Delete a Patient Past_history_illnesses record
@api_view(['DELETE'])
def deleteIllnessRecord(request, pk):
if request.method == 'DELETE':
pastRecord = pasthistory.objects.get(id=pk)
pastRecord.delete()
return Response(" Past History Illness Record deleted successfully") | yogendrasapkar/kingzo | firstApp/views.py | views.py | py | 7,157 | python | en | code | 0 | github-code | 90 |
7572287877 | from langchain.prompts.prompt import PromptTemplate
_template = """给定以下对话和后续问题,请将后续问题重新表述为一个独立的问题,使用中文回答.
对话历史:
{chat_history}
接下来的输入: {question}
独立问题:"""
CONDENSE_QUESTION_PROMPT_ZH = PromptTemplate.from_template(_template)
prompt_template = """使用下面的上下文来回答最后的问题。如果你不知道答案,只需要说你不知道,不要试图编造一个答案.
{context}
问题: {question}
有帮助的回答:"""
QA_PROMPT_ZH = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
| toby911/learngit | chains/condense_quest_prompt.py | condense_quest_prompt.py | py | 645 | python | zh | code | 0 | github-code | 90 |
4128436901 | import os
import sys
import logging
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from collections import defaultdict
from datetime import datetime, timedelta
import pandas as pd
from pyhdf.HDF import HDF, HDF4Error
from pyhdf import VS
def key_from_fname(fname):
"""
Return the ACE data key encoded in the file *fname*.
"""
col = os.path.basename(fname).split('_')
col0 = col[0]
col0 = col0.upper()
key = '_'.join([col0] + col[1:3])
return key
def parse_ace_data(hdf4_fname, N=1000):
"""
Load ACE data *hdf4_fname* and return a pandas :class:`DataFrame`
with the information. Process *N* lines of the HDF file at a time.
"""
key = key_from_fname(hdf4_fname)
hdf = HDF(hdf4_fname)
try:
vs = hdf.vstart()
vdata = vs.attach(key)
fieldinfo = vdata.fieldinfo()
loop_divmod = divmod(vdata.inquire()[0], N)
fields = [x[0] for x in fieldinfo]
data_map = defaultdict(list)
for i in range(loop_divmod[0] + 1):
try:
data = vdata.read(N if i < loop_divmod[0] else loop_divmod[1])
except HDF4Error:
break
for data_i in data:
for data_ii, field in zip(data_i, fields):
data_map[field].append(data_ii)
finally:
vdata.detach()
vs.vend()
hdf.close()
# convert to DataFrame
remove_set = set(['year',
'fp_year',
'day',
'fp_doy',
'hr',
'min',
'sec',
'ACEepoch'])
dt = []
for year, day, hr, minute, sec in zip(*[data_map[x] for x in ['year',
'day',
'hr',
'min',
'sec']]):
dt.append(datetime(year, 1, 1) + timedelta(days=day - 1,
hours=hr,
minutes=minute,
seconds=sec))
data = {k: v for k, v in data_map.iteritems() if k not in remove_set}
df = pd.DataFrame(index=dt,
data=data)
return df
def hdf4to5(hdf5_fname, hdf4_fname, key='ace'):
"""
Convert ACE HDF4 data record *hdf4_fname* to a pandas
:class:`DataFrame` and store in the HDF5 record
*hdf_fname*. Associate data with *key*.
"""
df = parse_ace_data(hdf4_fname)
df.to_hdf(hdf5_fname, key)
return hdf5_fname
def main(argv=None):
if argv is None:
argv = sys.argv
parser = ArgumentParser('Convert ACE HDF4 file to pandas HDF5 record.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('hdf5_fname',
type=str,
help='output HDF5 file')
parser.add_argument('hdf4_fname',
type=str,
help='input ACE HDF4 data record')
args = parser.parse_args(argv[1:])
hdf4to5(args.hdf5_fname,
args.hdf4_fname)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| butala/pyrsss | pyrsss/l1/hdf4to5.py | hdf4to5.py | py | 3,445 | python | en | code | 6 | github-code | 90 |
73844656936 | import numpy as np
import computeCostMulti as costMultiModule
def gradientDescentMulti(X, y, theta, alpha, num_iters):
"""Performs gradient descent to learn theta
theta = GRADIENTDESCENT(X, y, theta, alpha, num_iters) updates theta by
taking num_iters gradient steps with learning rate alpha """
m = y.size #number of training examples
J_history = np.zeros((num_iters, 1))
for iter in range(1, num_iters):
#do linear regression with identity (f(x) = x) as an activation function
prediction = np.dot(X, theta)
errors = prediction - y
delta = (1.0/m) * np.dot(X.T, errors)
#update weight
theta = theta - alpha * delta
#save the cost J in every iteration
J_history[iter] = costMultiModule.computeCostMulti(X, y, theta)
return J_history, theta
| hzitoun/machine_learning_from_scratch_matlab_python | algorithms_in_python/week_2/ex1/gradientDescentMulti.py | gradientDescentMulti.py | py | 861 | python | en | code | 30 | github-code | 90 |
28773248495 | def matches(a,b):
c=len(a)
d=len(b)
e=0
for i in range(max(c,d)):
if(i<c):
if(i<d):
if(a[i]==b[i]):
e+=1
return e
a=input("Enter 1st string :")
b=input("Enter 2nd string :")
print("Matches :",matches(a,b))
| 07python/python-programs | similar letters.py | similar letters.py | py | 295 | python | en | code | 0 | github-code | 90 |
28488218869 | import django
import os
import random
import decimal
import uuid
from datetime import datetime, timedelta
from django_seed import Seed
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings")
django.setup()
from api.transactions.models import Product, Transaction
cities = (
'Makati City',
'Pasig City',
'Pasay City',
'Marikina City'
)
product_names = (
'Cheese Classic',
'Hawaiian Overload',
'Bacon Overload',
'Veggies & Cheese Overload'
)
products = []
for name in product_names:
product = Product.objects.create(
name=name,
city=cities[random.randint(0, len(cities) - 1)]
)
products.append(product)
seeder = Seed.seeder()
start = datetime.now()
end = start - timedelta(days=30)
seeder.add_entity(Transaction, 75, {
'amount': lambda x: decimal.Decimal(random.randrange(100, 400, 30)),
'product': lambda x: products[random.randint(0, len(products) - 1)],
'date_time': lambda x: start + (end - start) * random.random(),
})
seeder.execute()
print('Seeding complete!')
| rrviloria/pizza-transaction | seed.py | seed.py | py | 1,055 | python | en | code | 0 | github-code | 90 |
18130451720 | class Solution:
def dp_solution(self,nums):
n=len(nums)
dp=[0]*n
#dp[i] means minimum number of jump to reach ith index
for i in range(1,n):
ans=1e4+1
for j in range(i):
if j+nums[j]>=i:
ans=min(ans,dp[j]+1)
dp[i]=ans
return dp[-1]
def jump(self, nums: List[int]) -> int:
res=0
l=r=0
while r<len(nums)-1:
far_most=0
for i in range(l,r+1):
far_most=max(far_most,i+nums[i])
res+=1
l=r+1
r=far_most
return res
| narendrasingodia1998/LeetCode | 0045-jump-game-ii/0045-jump-game-ii.py | 0045-jump-game-ii.py | py | 657 | python | en | code | 0 | github-code | 90 |
18469978299 | # https://atcoder.jp/contests/caddi2018/tasks/caddi2018_b
n = int(input())
odd = even = 0
for _ in range(n):
apple = int(input())
if apple % 2 == 0:
even += 1
else:
odd += 1
if odd:
print('first')
else:
print('second') | Aasthaengg/IBMdataset | Python_codes/p03197/s811334150.py | s811334150.py | py | 256 | python | en | code | 0 | github-code | 90 |
42263671057 | import os
import numpy as np
from datetime import datetime, timedelta
from netCDF4 import Dataset
from osgeo import gdal, osr
from gdalconst import *
from utils.s3_updnload import downloadBatch_s3
from utils.utils import mkfolder
import time as t
import glob
# =====Below adopted from remap.py
# Define KM_PER_DEGREE
KM_PER_DEGREE = 111.32
# GOES-R Extent (satellite projection) [llx, lly, urx, ury]
GOESr_fullEXTENT = [-5434894.885056, -5434894.885056, 5434894.885056, 5434894.885056]
GOESr_usEXTENT = [-2685383.084, 1523053.138, 2324660.158, 4529079.163]
# GOES-R Spatial Reference System (GOES-16 lon_0=-75.2)
sourcePrj = osr.SpatialReference()
sourcePrj.ImportFromProj4(
'+proj=geos +h=35786023.0 +a=6378137.0 +b=6356752.31414 +f=0.00335281068119356027 +lat_0=0.0 +lon_0=-89.5 +sweep=x +no_defs')
# Lat/lon WSG84 Spatial Reference System
targetPrj = osr.SpatialReference()
targetPrj.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
def ZtoT2K(UTC, form=None):
"""
Get get seconds fromIso8601('2000-01-01T12:00:00Z')
if UTC is a string, give the time format
otherwise UTC is a datetime() data type
"""
if form:
Z = datetime.strptime(UTC, form)
return (Z - datetime(2000, 1, 1, 12)).total_seconds()
return (UTC - datetime(2000, 1, 1, 12)).total_seconds()
def T2KtoZ(secs, form=None):
"""ISO standard format would be '%Y-%m-%dT%H:%M:%SZ'"""
dytim = datetime(2000, 1, 1, 12) + timedelta(seconds=secs)
if form:
return dytim.strftime(form) # <--return string if format given
return dytim # <--return datetime()
def exportImage(image, path):
driver = gdal.GetDriverByName('netCDF')
return driver.CreateCopy(path, image, 0)
def getGeoT(extent, nlines, ncols):
# Compute resolution based on data dimension
resx = (extent[2] - extent[0]) / ncols
resy = (extent[3] - extent[1]) / nlines
return [extent[0], resx, 0, extent[3], 0, -resy]
def getScaleOffset(path, vname):
"""Use netCDF4 """
nc = Dataset(path, mode='r')
scale = nc.variables[vname].scale_factor
offset = nc.variables[vname].add_offset
nc.close()
return scale, offset
def flipLat(arr):
return np.flipud(arr)
def rescale(arr, notVal, lower, upper, newRange):
print(lower, upper, arr[arr != notVal].min(), arr.max())
arr[arr > upper] = notVal
b = arr[arr != notVal]
arr[arr != notVal] = ((b - lower) / (upper - lower)) * (newRange[1] - newRange[0]) + newRange[0]
arr[arr > np.max(newRange)] = np.max(newRange)
arr[arr < np.min(newRange)] = np.min(newRange)
arr = arr.astype(np.uint8)
print(lower, upper, arr.min(), arr.max())
return arr
def inRaster(path, vname, disk='full', verb=False):
# Open a dataset/variable in a NetCDF file (GOES-R data)
connectionInfo = 'NETCDF:' + path + ':' + vname
raw = gdal.Open(connectionInfo, gdal.GA_ReadOnly)
# Setup projection and geo-transformation for GOES-16
if (disk == 'full'):
GOES_EXTENT = GOESr_fullEXTENT
elif (disk == 'conus'):
GOES_EXTENT = GOESr_usEXTENT
raw.SetProjection(sourcePrj.ExportToWkt())
raw.SetGeoTransform(getGeoT(GOES_EXTENT, raw.RasterYSize, raw.RasterXSize))
arr = raw.ReadAsArray()
arr = flipLat(arr)
ny, nx = arr.shape
driver = gdal.GetDriverByName("GTiff")
flipped = driver.Create('tmp.tif', nx, ny, 1, gdal.GDT_Float32)
flipped.SetGeoTransform(raw.GetGeoTransform()) ##sets same geotransform as input
flipped.SetProjection(raw.GetProjection()) ##sets same projection as input
flipped.GetRasterBand(1).WriteArray(arr)
flipped.GetRasterBand(1).SetNoDataValue(0) ##if you want these values transparent
raw = None
arr = None
# Read scale/offset from file
scale, offset = getScaleOffset(path, vname)
if (verb): print(' scale,offset:', scale, offset)
return flipped, scale, offset
def outRaster(raw, scale, offset, setRange, extent, resolution, verb=False):
# Compute grid dimension
sizex = int(((extent[2] - extent[0]) * KM_PER_DEGREE) / resolution)
sizey = int(((extent[3] - extent[1]) * KM_PER_DEGREE) / resolution)
# Create re-gridded grid [retangular (lat,lon)]
memDriver = gdal.GetDriverByName('MEM')
# grid = memDriver.Create('grid', sizex, sizey, 1, gdal.GDT_Float32)
grid = memDriver.Create('grid', sizex, sizey, 1, gdal.GDT_Float32)
# Setup projection and geo-transformation
grid.SetProjection(targetPrj.ExportToWkt())
grid.SetGeoTransform(getGeoT(extent, grid.RasterYSize, grid.RasterXSize))
# Perform the projection/resampling/re-gridding
gdal.ReprojectImage(raw, grid, sourcePrj.ExportToWkt(), targetPrj.ExportToWkt(),
gdal.GRA_NearestNeighbour, options=['NUM_THREADS=ALL_CPUS'])
arr = grid.ReadAsArray()
arr = arr * scale + offset
arr[arr < 0] = 0 # <--those are likely NOT in orig domain, set to NoDataValue
arr = rescale(arr, 0, 8, 80, setRange)
grid.GetRasterBand(1).SetNoDataValue(0)
grid.GetRasterBand(1).WriteArray(arr)
return grid
##################################################################
# ---Run gdal to make geotiff (no optimizer)
def makeGeoTiff():
s3bucket = os.getenv('RAW_DATA_BUCKET')
fdate = os.getenv('FLIGHT_DATE')
input_folder = os.getenv('ABI_INPUT_FLIGHT_PATH')
output_folder = os.getenv('ABI_OUTPUT_FLIGHT_PATH')
mkfolder(input_folder)
mkfolder(output_folder)
downloadBatch_s3(s3bucket, os.getenv('ABI_S3_KEY'), input_folder)
# Choose the visualization extent (min lon, min lat, max lon, max lat)
extent = [-140.6162904788845, 14.000163292174229, -49.179274701919105, 52.76771749693075]
# Choose the image resolution (the higher the number the faster the processing is)
resolution = 4.0
flist = glob.glob(input_folder + "/*")
for filenc in flist:
tstart = filenc.split('M3C13_G16_s')[-1][0:13]
secs = ZtoT2K(tstart, '%Y%j%H%M%S')
tstamp = T2KtoZ(secs, '%Y-%m-%dT%H:%M:%SZ')
t_in_sec = str(int(secs))
tifFile = output_folder + "/C13_" + t_in_sec + ".tif"
if not os.path.isfile(tifFile):
print(tstamp, ' is timestamp for ', t_in_sec, tstart)
print('Remapping', filenc.split('/')[-1])
start = t.time()
raw, scale, offset = inRaster(filenc, 'Rad', disk='conus', verb=False)
grid = outRaster(raw, scale, offset, [255, 0], extent, resolution, verb=False)
print('- finished! Time:', t.time() - start, 'seconds')
dst_ds = gdal.Translate(tifFile, grid, outputType=gdal.GDT_Byte)
dst_ds.GetRasterBand(1).SetNoDataValue(0)
raw = None # <-- Close file/dataset
grid = None
dst_ds = None
else:
pass
# print(f'file {tifFile} already exists')
| nasa/GHRC-FieldCampaign-eXplorer-core | mk_gdaltif.py | mk_gdaltif.py | py | 6,893 | python | en | code | 1 | github-code | 90 |
33557712089 | # just copying over most of "carml checkpypi" because it's a good
# example of "I want a stream over *this* circuit".
from __future__ import print_function
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import react
from twisted.internet.endpoints import TCP4ClientEndpoint
import txtorcon
from txtorcon.util import default_control_port
try:
import treq
except ImportError:
print("To use this example, please install 'treq':")
print("pip install treq")
raise SystemExit(1)
@react
@inlineCallbacks
def main(reactor):
ep = TCP4ClientEndpoint(reactor, '127.0.0.1', default_control_port())
# ep = UNIXClientEndpoint(reactor, '/var/run/tor/control')
tor = yield txtorcon.connect(reactor, ep)
print("Connected:", tor)
resp = yield treq.get(
'https://www.torproject.org:443',
agent=tor.web_agent(),
)
print("Retrieving {} bytes".format(resp.length))
data = yield resp.text()
print("Got {} bytes:\n{}\n[...]{}".format(
len(data),
data[:120],
data[-120:],
))
| meejah/txtorcon | examples/web_client_treq.py | web_client_treq.py | py | 1,085 | python | en | code | 245 | github-code | 90 |
8471033018 | from itertools import product
from restaurant_db import restaurants_given_state
NUM_ALTERNATIVES = 5
PRICE_ALTERNATIVES = [{"cheap", "moderate"},
{"moderate", "expensive"}]
LOCATION_ALTERNATIVES = [{"centre", "north", "west"},
{"centre", "north", "east"},
{"centre", "south", "west"},
{"centre", "south", "east"}]
FOOD_ALTERNATIVES = [{"thai", "chinese", "korean", "vietnamese", "asian oriental"},
{"mediterranean", "spanish", "portuguese", "italian", "romanian", "tuscan", "catalan"},
{"french", "european", "bistro", "swiss", "gastropub", "traditional"},
{"north american", "steakhouse", "british"},
{"lebanese", "turkish", "persian"},
{"international", "modern european", "fusion"}]
def get_alt_prefs_by_type(state, type):
if type == "foodtype":
alternative_foods = set()
for food_set in FOOD_ALTERNATIVES:
if state[type] in food_set:
alternative_foods.update(food_set)
alternative_foods.remove(state[type])
return alternative_foods
if type == "pricerange":
alternative_prices = set()
for price_set in PRICE_ALTERNATIVES:
if state[type] in price_set:
alternative_prices.update(price_set)
alternative_prices.remove(state[type])
return list(dict.fromkeys(alternative_prices))
alternative_area = set()
for location_set in LOCATION_ALTERNATIVES:
if state[type] in location_set:
alternative_area.update(location_set)
alternative_area.remove(state[type])
return list(dict.fromkeys(alternative_area))
def new_state(state, type, pref):
state2 = state.copy()
state2[type] = pref
return state2
def types_to_change(state):
foodtype = state["foodtype"]
area = state["area"]
pricerange = state["pricerange"]
types_to_change = []
if foodtype is not None and foodtype != "any" and state["confirmed_foodtype"]:
types_to_change.append("foodtype")
if area is not None and area != "any" and state["confirmed_area"]:
types_to_change.append("area")
if pricerange is not None and pricerange != "any" and state["confirmed_pricerange"]:
types_to_change.append("pricerange")
return types_to_change
def find_alt_restaurants(state, limit):
alt_restaurants = []
# First, we check for al types individually, the last confirmed one first
types = types_to_change(state)
if state["last-confirmed"] in types:
types.insert(0, types.pop(types.index(state["last-confirmed"])))
for type in types:
alt_restaurants += get_alternatives_for_type(state, type, limit - len(alt_restaurants))
if len(alt_restaurants) == limit:
return alt_restaurants
# If we do not find enough alternatives this way, we drop combinations
if len(types) == 3:
type_combinations = ((type1, type2) for type1 in types for type2 in types if type1 != type2)
for type1, type2 in type_combinations:
alt_restaurants += get_alternatives_for_types(state, type1, type2, limit - len(alt_restaurants))
if len(alt_restaurants) == limit:
return alt_restaurants
# If we still have not found enough, we will relax the additional requirements
add_reqs = state["add_reqs"]
if add_reqs is not None and len(add_reqs) > 0:
for i in range(1, len(add_reqs)):
alt_restaurants += get_alternatives_for_add_reqs(state, add_reqs[i:], limit - len(alt_restaurants))
if len(alt_restaurants) == limit:
return alt_restaurants
return alt_restaurants
def get_alternatives_for_type(state, type, limit):
alt_prefs = get_alt_prefs_by_type(state, type)
alt_restaurants = []
for pref in alt_prefs:
if len(alt_restaurants) >= limit:
break
new_state = state.copy()
new_state[type] = pref
alt_restaurants += restaurants_given_state(new_state)
return alt_restaurants[:limit]
def get_alternatives_for_types(state, type1, type2, limit):
type1_alt_prefs = get_alt_prefs_by_type(state, type1)
type2_alt_prefs = get_alt_prefs_by_type(state, type2)
pref_combinations = product(type1_alt_prefs, type2_alt_prefs)
alt_restaurants = []
for type1_pref, type2_pref in pref_combinations:
if len(alt_restaurants) >= limit:
break
new_state = state.copy()
new_state[type1] = type1_pref
new_state[type2] = type2_pref
alt_restaurants += restaurants_given_state(new_state)
return alt_restaurants[:limit]
def get_alternatives_for_add_reqs(state, add_reqs, limit):
new_state = state.copy()
new_state["add_reqs"] = add_reqs
return restaurants_given_state(new_state)[:limit]
| jokke150/Restaurant-Recommendation-System | alternative_rules.py | alternative_rules.py | py | 4,968 | python | en | code | 1 | github-code | 90 |
24083365304 | import pandas as pd
import text_analytic_tools.common.text_corpus as text_corpus
DATA_FOLDER = '../../data'
CORPUS_NAME_PATTERN = '*.txt.zip'
CORPUS_TEXT_FILES_PATTERN = '*.txt'
DOCUMENT_FILTERS = [
{
'type': 'multiselect',
'description': 'Pope',
'field': 'pope'
},
{
'type': 'multiselect',
'description': 'Genre',
'field': 'genre'
},
{
'type': 'multiselect',
'description': 'Year',
'field': 'year',
'query': 'year > 0'
}
]
POPE_GROUP_FILTERS = [
{
'type': 'multiselect',
'description': 'Pope',
'field': 'pope'
},
{
'type': 'multiselect',
'description': 'Genre',
'field': 'genre'
},
{
'type': 'multiselect',
'description': 'Year',
'field': 'year'
}
]
GROUP_BY_OPTIONS = [
('Year', ['year']),
('Pope', ['pope']),
('Pope, Year', ['pope', 'year']),
('Genre', ['genre']),
('Pope, genre', ['pope', 'genre']),
('Pope, year, genre', ['pope', 'year', 'genre'])
]
def compile_documents_by_filename(filenames):
parts = [ x.split('_') for x in filenames ]
pope, lang, genre, year, sub_genre = list(zip(*[ (x[0], x[1], x[2], x[3]if x[3].isdigit() else x[4], x[4] if x[3].isdigit() else x[3]) for x in parts ]))
df = pd.DataFrame( {
'filename': filenames,
'pope': pope,
'lang': lang,
'genre': genre,
'year': [ int(x) for x in year],
'sub_genre': sub_genre
})
df['document_id'] = df.index
df['title'] = df.filename
return df
def _compile_documents(corpus, corpus_index=None):
if len(corpus) == 0:
return None
if corpus_index is not None:
assert False, "bug: filenames not defined"
filenames = []
corpus_index = corpus_index[corpus_index.filename.isin(filenames)]
return corpus_index
df = pd.DataFrame([ x._.meta for x in corpus ])
return df
def compile_documents(corpus, index=None):
filenames = [ x._.meta['filename'] for x in corpus ]
df = compile_documents_by_filename(filenames)
return df
def get_document_stream(source, lang, **kwargs):
if isinstance(source, str):
# FIXME Use "smart_open" or "open_sesame" library instead
reader = text_corpus.CompressedFileReader(source)
else:
reader = source
lookup = compile_documents_by_filename(reader.filenames).set_index('filename')
lookup['filename'] = lookup.index
row_id = 0
for filename, text in reader:
metadata = lookup.loc[filename].to_dict()
yield filename, row_id, text, metadata
row_id += 1
# FIXME VARYING ASPECTs: What attributes to extend
def add_domain_attributes(df, document_index):
df_extended = pd.merge(df, document_index, left_index=True, right_index=True, how='inner')
return df_extended[['filename', 'year', 'genre', 'keyterms']]
| humlab/text_analytic_tools | text_analytic_tools/domain/Vatican/domain_logic.py | domain_logic.py | py | 3,090 | python | en | code | 1 | github-code | 90 |
70279937897 | from flask_wtf import FlaskForm
from wtforms import IntegerField,SelectField,SubmitField
from wtforms.validators import DataRequired,NumberRange
states=[('kerala','Kerala'),('bihar','Bihar'),('tamil_nadu','Tamil nadu'),('assam_and_meghalaya','Assam & Meghalaya'),
('nagaland_manipur_mizoram_tripura','Nagaland,Manipur,Mizoram & Tripura'),('orissa','Orissa'),('jharkhand','Jharkhand'),
('east_UP','Eastern Uttar Pradesh'),('west_UP','Western Uttar Pradesh'),('uttarakhand','Uttarakhand'),('haryana_delhi_chandigarh','Haryana,Delhi & Chandigarh'),
('punjab','Punjab'),('himachal_pradesh','Himachal Pradesh'),('jammu_and_kashmir','Jammu & Kashmir'),('costal_karnataka','Costal Karnataka'),
('north_interior_karnataka','North Interior Karnataka'),('south_interior_karnataka','South Interior Karnataka'),('telangana','Telangana'),
('costal_andhra_pradesh','Costal Andhra Pradesh'),('Chhattisgarh','Chhattisgarh'),('goa_and_konkan','Goa & Konkan'),
('madhya_maharashtra','Madhya Maharashtra'),('gujrat','Gujrat'),('west_rajasthan','West Rajasthan'),('east_rajasthan','East Rajasthan'),
('west_madhya_pradesh','West Madhya Pradesh'),('east_madhya_pradesh','East Madhya Pradesh')]
class askForm(FlaskForm):
year = IntegerField('Enter Year',validators=[DataRequired(),NumberRange(min=1850,max=2050)])
state = SelectField('Select State', choices=states)
submit = SubmitField('Submit') | aryapande/rainfallML | forms.py | forms.py | py | 1,460 | python | en | code | 0 | github-code | 90 |
18558979639 | n, k = map(int, input().split())
ans = 0
for i in range(k + 1, n + 1):
ans += (n // i) * (i - k)
if k == 0:
m = n % i
else:
m = n % i - k + 1
ans = max(ans, ans + m)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03418/s102268010.py | s102268010.py | py | 210 | python | fr | code | 0 | github-code | 90 |
43869991178 | import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import r2_score
tree = ET.parse('HY202103_D08_(0,2)_LION1_DCM_LMZC.xml')
root = tree.getroot()
def snf(a):
splt = a.text.split(',')
flst = list(map(float,splt))
return flst
wvlen = []
itst = []
for data in root.iter('L'):
L = snf(data)
wvlen.append(L)
for data in root.iter('IL'):
IL = snf(data)
itst.append(IL)
lgds = []
for data in root.iter("WavelengthSweep"):
lgds.append(data.get("DCBias"))
dp1 = np.polyfit(wvlen[6], itst[6], 3)
f1 = np.poly1d(dp1)
plt.title("Transmission spectra-as measured")
plt.xlabel("Wavelength [nm]")
plt.ylabel("Measured transmission [dB]")
plt.plot(wvlen[6], itst[6],label="Raw")
plt.plot(wvlen[6], f1(wvlen[6]), 'r--', label='Fit')
plt.rc("legend", fontsize = 7)
plt.legend(loc = 'best' , ncol = 3)
plt.show()
print(r2_score(itst[6], f1(wvlen[6])))
| ChiYoSeop/Gitprac | PE02_TW03/PE02_TW03_REF_Raw & fit.py | PE02_TW03_REF_Raw & fit.py | py | 933 | python | en | code | 0 | github-code | 90 |
39377083240 | def create_environment():
global length,breadth,x_axis,y_axis
print("Enter data for environment: ")
length = int(input("Enter length: "))
breadth = int(input("Enter breadth: "))
print("Co-ordinate of rectangular area (4 - corners) is:")
print("[0,0] {} {} {} in clockwise direction".format
([0,breadth],[length,breadth],[length , 0]))
print()
return length,breadth
def set_obstacle():
obstacles = []
obstacle_no = int(input("Enter number of obstacle "))
print("for obstacle enter three parameter ")
print("x_axis y_axix length ")
print()
while obstacle_no > 0:
obs = list(map(int,input().split()))
assert obs[2] < length - 10, "Length of obstacle will be less than length of environment"
obstacles.append(obs)
obstacle_no -= 1
return obstacles
| suraj7337/Robot-Assembling | Environment.py | Environment.py | py | 874 | python | en | code | 0 | github-code | 90 |
22770196073 | class Solution:
def ladderLength2(self, beginWords, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
if not wordList or endWord not in wordList:
return 0
next_beginWords = []
next_wordList = []
for word in beginWords:
for word_in_list in wordList:
if self.compare(word,word_in_list):
if word_in_list == endWord:
return 1
else:
next_beginWords.append(word_in_list)
next_wordList.append(word_in_list)
if not next_beginWords:
return 0
result = self.ladderLength2(next_beginWords,endWord,next_wordList)
if result:
return 1 + result
else:
return 0
def ladderLength(self, beginWord, endWord, wordList):
if not wordList or endWord not in wordList:
return 0
result = self.ladderLength2([beginWord],endWord,wordList)
if result:
return 1 + result
else:
return 0
def compare(self,a_word,b_word):
diff = 0
for i in range(len(a_word)):
if not a_word[i] == b_word[i]:
diff += 1
if diff > 1:
return False
if diff == 0:
return False
return True
u = Solution()
print(u.ladderLength("hot","dog",["hot","dog"]))
#
#"hit","cog", ["hot","dot","dog","lot","log","cog"]))
| amisyy/leetcode | ladderLength.py | ladderLength.py | py | 1,596 | python | en | code | 0 | github-code | 90 |
7145796448 | from rest_framework import serializers
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer as BaseTokenObtainSerializer
from rest_framework_simplejwt.settings import api_settings
from rest_framework_simplejwt.tokens import RefreshToken
from Apps.Authentication.models.login import DeviceLogin
class TokenObtainPairSerializer(BaseTokenObtainSerializer):
@classmethod
def get_token(cls, user):
return RefreshToken.for_user(user)
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
request_data = self.context.get("request")
refresh["device_id"] = DeviceLogin.objects.get_or_create(self.user, request_data).id
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
return data
class TokenRefreshSerializer(serializers.Serializer):
refresh = serializers.CharField()
access = serializers.ReadOnlyField()
def validate(self, attrs):
request = self.context["request"]
refresh = RefreshToken(attrs['refresh'])
data = {'access': str(refresh.access_token)}
if api_settings.ROTATE_REFRESH_TOKENS:
if api_settings.BLACKLIST_AFTER_ROTATION:
try:
# Attempt to blacklist the given refresh token
refresh.blacklist()
except AttributeError:
# If blacklist app not installed, `blacklist` method will
# not be present
pass
refresh.set_jti()
refresh.set_exp()
data['refresh'] = str(refresh)
return data
| khan-asfi-reza/RaydBlog | backend/Apps/Authentication/serializers/jwt.py | jwt.py | py | 1,688 | python | en | code | 0 | github-code | 90 |
12926100654 | import requests
import numpy as np
import matplotlib.pyplot as plt
import datetime
import os
def get_github_snk_data(username):
url = f"https://api.github.com/users/{username}/events"
response = requests.get(url)
if response.status_code == 200:
events = response.json()
contributions = [0] * 365
for event in events:
if event["type"] == "PushEvent":
date_str = event["created_at"][:10]
date = datetime.datetime.strptime(date_str, "%Y-%m-%d").date()
day_of_year = date.timetuple().tm_yday - 1 # tm_yday is 1-indexed
contributions[day_of_year] += len(event["payload"]["commits"])
print(contributions[day_of_year])
return contributions
else:
print(f"Failed to fetch contributions for {username}")
return []
def plot_snk_chart(data):
# 创建一个6x53的矩阵来表示一周的贪吃蛇图
snk_matrix = np.zeros((7, 53), dtype=int)
# 将数据填充到贪吃蛇图矩阵中
for item in data:
x = item['x']
y = item['y']
count = item['count']
level = item['level']
snk_matrix[y, x] = count if count > 0 else level
# 定义颜色映射表
cmap = plt.get_cmap('viridis')
# 绘制贪吃蛇图
plt.imshow(snk_matrix, cmap=cmap, aspect='auto', interpolation='none')
# 隐藏x轴和y轴刻度标签
plt.xticks([])
plt.yticks([])
# 添加标题和颜色条
plt.title("GitHub Contributions - Snake Chart")
plt.colorbar(label="Contributions")
# 显示图像
plt.show()
def main():
username = "WangYingJay"
snk_data = get_github_snk_data(username)
if snk_data:
print(snk_data)
# 在这里,你可以处理获取到的SVG数据,如保存到本地文件,显示图像等
# 这里只是一个示例 打印出SVG数据
# plot_snk_chart(snk_data)
if __name__ == "__main__":
main()
| WangYingJay/WangYingJay | service/request_create_snake_api.py | request_create_snake_api.py | py | 1,989 | python | en | code | 0 | github-code | 90 |
27011018664 | from camera_algorithms.camera1_model import PeopleDetector
#Loading model
net = PeopleDetector()
net.load_network()
# Get the video writer initialized to save the output video
def queue (img) :
# get frame from the video
frame=img
#Get te predictions from the model
outs = net.predict(frame)
#Use model predictions to get total numbers of persons in the queue and number of persons in danger
_,n_total,n_mal=net.process_preds(frame, outs)
net.clear_preds()
return frame,n_total,n_mal | omaralam96/COVID-19-Prevention-CVC-Competition | camera_algorithms/camera1.py | camera1.py | py | 517 | python | en | code | 2 | github-code | 90 |
18592905919 | n=int(input())
f=[]
P=[]
for i in range(n):
a=list(map(int,input().split()))
f.append(a)
for i in range(n):
a=list(map(int, input().split()))
P.append(a)
from itertools import product
ans=-(10**10)
for p in product([0, 1], repeat = 10):
if sum(p)!=0:
ret = 0
for i in range(n):
cnt = 0
for j in range(10):
if p[j] == f[i][j] and p[j] == 1:
cnt += 1
ret += P[i][cnt]
ans = max(ans, ret)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03503/s091731182.py | s091731182.py | py | 512 | python | en | code | 0 | github-code | 90 |
19976050115 | from collections import deque
from math import floor, ceil
class Pair:
def __init__(self, a, b):
self.a = a
self.b = b
def copy(self):
result = Pair(self.a, self.b)
if type(self.a) is Pair:
result.a = self.a.copy()
if type(self.b) is Pair:
result.b = self.b.copy()
return result
def __getitem__(self, index):
if index == 0:
return self.a
elif index == 1:
return self.b
else:
raise Exception("Index provided for Pair was not 0 or 1")
def __setitem__(self, index, val):
if index == 0:
self.a = val
elif index == 1:
self.b = val
else:
raise Exception("Index provided for Pair was not 0 or 1")
def __str__(self):
return f"({self.a.__str__()}, {self.b.__str__()})"
##############################################################################
# EXPLODING
##############################################################################
def get_leftmost_explode(pair, level, path):
lhs = pair[0]
rhs = pair[1]
if type(lhs) is Pair:
path.append(0)
if level >= 3:
return lhs
left_result = get_leftmost_explode(lhs, level+1, path)
if left_result:
return left_result
else:
path.pop()
if type(rhs) is Pair:
path.append(1)
if level >= 3:
return rhs
right_result = get_leftmost_explode(rhs, level+1, path)
if right_result:
return right_result
else:
path.pop()
return False
def update_closest_left(num, pair, path):
# follow path up to the last time you went right
last_right_index = None
for i in reversed(range(len(path))):
if path[i] == 1:
last_right_index = i
break
if last_right_index == None:
return
current = num
for step in path[:last_right_index]:
current = current[step]
# go left once, and then right until you hit a regular
if not type(current[0]) is Pair:
current[0] += pair[0]
return
current = current[0]
while type(current[1]) is Pair:
current = current[1]
current[1] = current[1] + pair[0]
def update_closest_right(num, pair, path):
# follow path up to the last time you went left
last_left_index = None
for i in reversed(range(len(path))):
if path[i] == 0:
last_left_index = i
break
if last_left_index == None:
return
current = num
for step in path[:last_left_index]:
current = current[step]
# go right once, and then left until you hit a regular
if not type(current[1]) is Pair:
current[1] += pair[1]
return
current = current[1]
while type(current[0]) is Pair:
current = current[0]
current[0] = current[0] + pair[1]
def set_explode_zero(num, pair, path):
current = num
for step in path[:-1]:
current = current[step]
current[path[-1]] = 0
def explode(num):
path = []
to_explode = get_leftmost_explode(num, 0, path)
if not to_explode:
return False
update_closest_left(num, to_explode, path)
update_closest_right(num, to_explode, path)
set_explode_zero(num, to_explode, path)
return True
##############################################################################
# SPLITTING
##############################################################################
def get_leftmost_split(pair, path):
lhs = pair[0]
rhs = pair[1]
if type(lhs) is Pair:
path.append(0)
left_result = get_leftmost_split(lhs, path)
if left_result:
return left_result
else:
path.pop()
elif lhs >= 10:
path.append(0)
return True
if type(rhs) is Pair:
path.append(1)
right_result = get_leftmost_split(rhs, path)
if right_result:
return right_result
else:
path.pop()
elif rhs >= 10:
path.append(1)
return True
return False
def split(num):
path = []
exists = get_leftmost_split(num, path)
if not exists:
return False
current = num
for step in path[:-1]:
current = current[step]
val = current[path[-1]]
current[path[-1]] = Pair(floor(val/2), ceil(val/2))
return True
##############################################################################
def magnitude(num):
if not type(num) is Pair:
return num
return 3*magnitude(num[0]) + 2*magnitude(num[1])
def reduce(num):
action = True
while action:
action = explode(num)
if action:
continue
action = split(num)
return num
def snailfish_add(a, b):
return reduce(Pair(a,b))
def part_one(numbers):
result = numbers[0]
for n in numbers[1:]:
result = snailfish_add(result, n)
mag = magnitude(result)
print(mag)
def part_two(numbers):
max_mag = -1
for i, original_n1 in enumerate(numbers):
for j, original_n2 in enumerate(numbers):
if i == j:
continue
n1 = original_n1.copy()
n2 = original_n2.copy()
sum = snailfish_add(n1, n2)
mag = magnitude(sum)
if mag > max_mag:
max_mag = mag
print(max_mag)
def parse_line(line):
lhs = deque()
stack = deque()
for c in line:
if c == '[':
stack.append(())
elif c == ',':
lhs.append(stack.pop())
elif c == ']':
rhs = stack.pop()
discard = stack.pop()
stack.append(Pair(lhs.pop(), rhs))
else:
stack.append(int(c))
return stack.pop()
if __name__ == "__main__":
numbers = []
with open("input.txt", "r") as f:
while 1:
line = f.readline().strip()
if not line:
break
numbers.append(parse_line(line))
# part_one(numbers)
part_two(numbers)
| bitwitch/advent-of-code | aoc2021/18-snailfish/snailfish.py | snailfish.py | py | 6,139 | python | en | code | 1 | github-code | 90 |
30071786820 | import random
from datetime import timedelta, datetime
from copy import deepcopy
import pytest
from faker import Faker
from django.utils import timezone
from auditor.bolean_auditor.process_protocol import TodayExternalIP, PortRank, \
ProtocolIPRank, IPSource, Processor, IPQueueProcess, PreProcess, AttackIPRank
from utils.unified_redis import cache, IPDuplicate
from statistic.tasks import AttackIPStatisticTask
fake = Faker()
fake1 = fake.ipv4()
fake2 = fake.ipv4()
fake3 = fake.ipv4()
fake4 = fake.ipv4()
fake5 = fake.ipv4()
fake6 = fake.ipv4()
@pytest.fixture(scope='function')
def data():
data_7 = [
{'src_ip': None, 'dst_ip': None, 'src_port': None, 'dst_port': None,
'occurred_at': '2020-12-24T06:10:00'}
for _ in range(20)]
data_1 = [{'src_ip': fake1, 'dst_ip': '192.168.1.1', 'src_port': 22,
'dst_port': 4443, 'occurred_at': '2020-12-24T06:10:00'}
for _ in range(10)]
data_2 = [{'src_ip': '10.0.1.1', 'dst_ip': fake2, 'src_port': 32,
'dst_port': 6665, 'occurred_at': '2020-12-24T06:10:00'}
for _ in range(8)]
data_3 = [{'src_ip': fake3, 'dst_ip': '172.16.15.1', 'src_port': 11,
'dst_port': 443, 'occurred_at': '2020-12-24T06:10:00'}
for _ in range(6)]
data_4 = [{'src_ip': '10.0.2.2', 'dst_ip': fake4, 'src_port': 2222,
'dst_port': 2222, 'occurred_at': '2020-12-24T06:10:00'}
for _ in range(4)]
data_5 = [{'src_ip': fake5, 'dst_ip': '172.16.16.1', 'src_port': 42,
'dst_port': 3333, 'occurred_at': '2020-12-24T06:10:00'}
for _ in range(2)]
data_6 = [
{'src_ip': fake6, 'dst_ip': '10.4.4.4', 'src_port': 12, 'dst_port': 12,
'occurred_at': '2020-12-24T06:10:00'}
for _ in range(1)]
data_8 = [
{'src_ip': '2401:ba00:8:1::1', 'dst_ip': '2401:ba00:8:1::2',
'src_port': None, 'dst_port': None,
'occurred_at': '2020-12-24T06:10:00'}
]
return data_1 + data_2 + data_3 + data_4 + data_5 + data_6 + data_7 + data_8
class TestTodayExternalIP:
def test_external_ip(self, data):
TodayExternalIP.clean()
process = TodayExternalIP(timezone.now())
for d in data:
process.process(d)
process.save()
assert process.get_top_n() == [
{'ip': fake1, 'count': 10},
{'ip': fake2, 'count': 8},
{'ip': fake3, 'count': 6},
{'ip': fake4, 'count': 4},
{'ip': fake5, 'count': 2},
]
class TestPortRank:
def test_port_rank(self, data):
PortRank.clean()
process = PortRank(timezone.now())
for d in data:
process.process(d)
process.save()
src = [
{'port': '22', 'count': 10},
{'port': '32', 'count': 8},
{'port': '11', 'count': 6},
{'port': '2222', 'count': 4},
{'port': '42', 'count': 2},
{'port': '其他', 'count': 1}
]
dst = [
{'port': '4443', 'count': 10},
{'port': '6665', 'count': 8},
{'port': '443', 'count': 6},
{'port': '2222', 'count': 4},
{'port': '3333', 'count': 2},
{'port': '其他', 'count': 1}
]
assert process.get_top_n_src_port() == src
assert process.get_top_n_dst_port() == dst
assert process.get_top_n() == {
'src_port': src,
'dst_port': dst,
}
@pytest.mark.django_db
class TestProtocolIPRank:
"""
测试IP通讯排名,分为源IP和目的IP
"""
def test_process(self, data):
ProtocolIPRank.clean()
process = ProtocolIPRank(timezone.now())
for d in data:
process.process(d)
process.save()
src = [
{'ip': fake1, 'count': 10, 'percent': 100.0},
{'ip': '10.0.1.1', 'count': 8, 'percent': 80.0},
{'ip': fake3, 'count': 6, 'percent': 60.0},
{'ip': '10.0.2.2', 'count': 4, 'percent': 40.0},
{'ip': fake5, 'count': 2, 'percent': 20.0},
]
assert process.get_top_n_src_ip() == src
dst = [
{'ip': '192.168.1.1', 'count': 10, 'percent': 100.0},
{'ip': fake2, 'count': 8, 'percent': 80.0},
{'ip': '172.16.15.1', 'count': 6, 'percent': 60.0},
{'ip': fake4, 'count': 4, 'percent': 40.0},
{'ip': '172.16.16.1', 'count': 2, 'percent': 20.0},
]
assert process.get_top_n_dst_ip() == dst
assert process.get_top_n() == {
'src_ip': src, 'dst_ip': dst
}
@pytest.mark.django_db
class TestProcess:
def test_process_list(self, data):
processor = Processor.process_list(timezone.now())
processor_ = processor
while processor_:
processor_.clean()
processor_ = processor_._next_processor
for d in data:
processor.process(d)
processor.save()
assert cache.keys(TodayExternalIP.key_pattern + '*') != []
assert cache.keys(PortRank.src_total_key_pattern + '*') != []
assert cache.keys(ProtocolIPRank.src_ip_pattern + '*') != []
assert cache.keys(IPSource.city_key_pattern + '*') != []
assert cache.keys(IPQueueProcess.foreign_key + '*') != []
assert cache.keys(AttackIPRank.src_ip_pattern + '*') != []
foreign_data = [
{'src_ip': '67.220.91.30', 'dst_ip': '192.168.2.2', 'country': '美国',
'occurred_at': '2020-12-24T06:10:00'},
{'src_ip': '133.242.187.207', 'dst_ip': '175.45.20.138', 'country': '日本',
'occurred_at': '2020-12-24T06:10:00'}, # 日本
{'src_ip': '212.219.142.207', 'dst_ip': '192.168.1.1', 'country': '英国',
'occurred_at': '2020-12-24T06:10:00'}, # 英国
{'src_ip': '176.192.102.130', 'dst_ip': '192.168.1.1', 'country': '俄罗斯',
'occurred_at': '2020-12-24T06:10:00'}, # 俄罗斯
{'src_ip': '92.103.174.236', 'dst_ip': '192.168.1.1', 'country': '法国',
'occurred_at': '2020-12-24T06:10:00'}, # 法国
]
chinese_data = [
{'src_ip': '175.45.20.138', 'dst_ip': '202.207.251.20',
'occurred_at': '2020-12-24T06:10:00'}, # 香港->太原
{'src_ip': '122.100.160.253', 'dst_ip': '123.138.162.112',
'occurred_at': '2020-12-24T06:10:00'}, # 澳门->西安
{'src_ip': '123.193.51.187', 'dst_ip': '192.168.1.1',
'occurred_at': '2020-12-24T06:10:00'}, # 台北->北京
{'src_ip': '192.168.1.1', 'dst_ip': '123.193.51.187',
'occurred_at': '2020-12-24T06:10:00'}, # 北京->台北
{'src_ip': '192.168.1.1', 'dst_ip': '10.0.1.1',
'occurred_at': '2020-12-24T06:10:00'}, # 内网不记录
{'src_ip': None, 'dst_ip': '192.168.1.1',
'occurred_at': '2020-12-24T06:10:00'},
{'src_ip': '123.222.222.222', 'dst_ip': None,
'occurred_at': '2020-12-24T06:10:00'},
{'src_ip': None, 'dst_ip': None, 'occurred_at': '2020-12-24T06:10:00'},
]
ipv6_data = [
{'src_ip': '2401:ba00:8:1::1', 'dst_ip': '2401:ba00:8:1::1',
'occurred_at': '2020-12-24T06:10:00'},
{'src_ip': '2001:200:1c0:3601::80:1', 'dst_ip': 'fe80::50ee:cfff:fe4b:783a',
'occurred_at': '2020-12-24T06:10:00'}
]
@pytest.mark.django_db
class TestIPSource:
"""
IPSource模块做了很多工作,比如统计IP地图,威胁源地区,攻击次数,攻击源IP,境外访问个数
"""
def create_processor(self):
current = timezone.now()
processor = PreProcess(current)
processor.set_next(IPSource(current))
return processor
def test_ip_map_foreign(self):
"""
国外IP流向
"""
IPSource.clean()
source = self.create_processor()
data = foreign_data + ipv6_data
for d in data:
for _ in range(2):
source.process(d)
source.save()
source = IPSource(timezone.now())
result = source.get_city_data()
for i, r in enumerate(result):
assert r.src_c in ['美国', '日本', '英国', '俄罗斯', '法国']
assert r.dst_c == '中国'
assert r.count == 2
source = IPSource(timezone.now())
for d in foreign_data:
source.process(d)
source.save()
result = source.get_city_data()
for i, r in enumerate(result):
assert r.src_c in ['美国', '日本', '英国', '俄罗斯', '法国']
assert r.dst_c == '中国'
assert r.count == 3
def test_ip_map_chinese(self):
"""
国内IP流向
"""
IPSource.clean()
source = self.create_processor()
for d in chinese_data[:3]:
for _ in range(5):
source.process(d)
source.save()
source = IPSource(timezone.now())
result = source.get_city_data()
for i, r in enumerate(result):
assert r.city in ['香港->太原', '澳门->西安', '台北->北京', '北京->台北']
assert r.count == 5
def test_external_ip(self):
"""
外网IP数据量, 源或目的IP是外网的都统计,不去重
:return:
"""
IPDuplicate.create_external_ip(timezone.now()).force_clean()
IPSource.clean()
source = self.create_processor()
data = foreign_data + chinese_data + ipv6_data
for d in data:
source.process(d)
source.save()
source = IPSource(timezone.now())
assert source.get_attack_data()['external_ip'] == 16
def test_attack_count(self):
"""
攻击次数统计,源IP是外网IP的统计,不去重
"""
IPDuplicate.create_external_ip(timezone.now()).force_clean()
IPSource.clean()
source = self.create_processor()
data = foreign_data + chinese_data + ipv6_data
for d in data:
source.process(d)
source.save()
source = IPSource(timezone.now())
assert source.get_attack_data()['count'] == 11
def test_attack_src_ip(self):
"""
今日攻击源IP个数,IP需要对今日已出现的IP去重
"""
IPSource.clean()
source = self.create_processor()
data = foreign_data + foreign_data + chinese_data + chinese_data + ipv6_data
for d in data:
source.process(d)
source.save()
source = IPSource(timezone.now())
assert source.get_attack_data()['src_ip'] == 11
def test_attack_foreign(self):
"""
今日境外访问IP个数,需要对今日已出现的ip去重
"""
IPSource.clean()
source = self.create_processor()
data = foreign_data + foreign_data + chinese_data + chinese_data + ipv6_data
for d in data:
source.process(d)
source.save()
source = IPSource(timezone.now())
assert source.get_attack_data()['foreign'] == 6
def test_attack_history_src_ip(self):
"""
累计的攻击源IP个数,需要对历史的IP去重
"""
duplicate = IPDuplicate.create_duplicate_ip(timezone.now())
duplicate.force_clean()
for i in ['67.220.91.30', '133.242.187.207', '123.193.51.187']:
duplicate.is_duplicate_ip(i)
IPSource.clean()
source = IPSource(timezone.now())
data = foreign_data + foreign_data + chinese_data + chinese_data + ipv6_data
for d in data:
source.process(d)
source.save()
assert source.get_attack_data()['history_src_ip'] == 8
def test_attack_history_foreign(self):
"""
累计的境外IP个数,需要对历史的IP去重
"""
duplicate = IPDuplicate.create_duplicate_ip(timezone.now())
duplicate.force_clean()
for i in ['67.220.91.30', '133.242.187.207', '123.193.51.187']:
duplicate.is_duplicate_ip(i)
IPSource.clean()
source = IPSource(timezone.now())
data = foreign_data + foreign_data + chinese_data + chinese_data + ipv6_data
for d in data:
source.process(d)
source.save()
assert source.get_attack_data()['history_foreign'] == 4
def test_attack_ip_statistic_total(self):
duplicate = IPDuplicate.create_duplicate_ip(timezone.now())
duplicate.force_clean()
for i in ['67.220.91.30', '133.242.187.207', '123.193.51.187']:
duplicate.is_duplicate_ip(i)
data = foreign_data + foreign_data + chinese_data + chinese_data + ipv6_data
IPSource.clean()
source = IPSource(timezone.now() - timedelta(days=1))
for d in data:
source.process(d)
source.save()
statistic = AttackIPStatisticTask.run(timezone.now())
assert statistic.count == 5 + 5 + 4 + 4 + 2
assert statistic.src_ip == 3 + 3 + 2
assert statistic.foreign == 3 + 1
assert statistic.external_ip == 6 + 6 + 7 + 7 + 3
def test_get_country_top(self):
"""
获取国家排名前5名的
"""
data = foreign_data + foreign_data + chinese_data + ipv6_data
IPSource.clean()
source = IPSource(timezone.now())
for d in data:
source.process(d)
source.save()
country = source.get_country_top_n()
assert sorted([i.country for i in country]) == \
sorted(['中国', '美国', '日本', '英国', '俄罗斯'])
@pytest.mark.django_db
class TestIPQueueProcess:
def create_proccessor(self):
current = timezone.now()
processor = PreProcess(current)
processor.set_next(IPQueueProcess(current))
return processor
def test_external_ip(self):
"""
每次执行,保留最近的5条外网IP数据
"""
chinese_data_copy = deepcopy(ipv6_data) + deepcopy(chinese_data)
for i, c in enumerate(chinese_data_copy):
c['occurred_at'] = datetime(2020, 12, 24, 6, 40 - i).isoformat()
IPQueueProcess.clean()
process = self.create_proccessor()
for c in chinese_data_copy:
process.process(c)
process.save()
process = IPQueueProcess(timezone.now())
result = process.get_external_ip()
assert [r['ip'] for r in result] == [
'2401:ba00:8:1::1', '2001:200:1c0:3601::80:1', '175.45.20.138',
'122.100.160.253', '123.193.51.187']
data_copy = deepcopy(chinese_data) + deepcopy(ipv6_data)
for i, c in enumerate(data_copy):
c['occurred_at'] = datetime(2020, 12, 24, 7, 40 - i).isoformat()
process = self.create_proccessor()
for c in data_copy:
process.process(c)
process.save()
process = IPQueueProcess(timezone.now())
result = process.get_external_ip()
assert [r['ip'] for r in result] == [
'175.45.20.138', '122.100.160.253', '123.193.51.187',
'123.222.222.222', '2401:ba00:8:1::1',
]
def test_foreign_ip(self):
"""
每次执行保存最近的5条境外IP数据
"""
data_copy = deepcopy(ipv6_data) + deepcopy(chinese_data) + deepcopy(
foreign_data)
for i, c in enumerate(data_copy):
c['occurred_at'] = datetime(2020, 12, 24, 7, 50 - i).isoformat()
IPQueueProcess.clean()
process = self.create_proccessor()
for c in data_copy:
process.process(c)
process.save()
process = IPQueueProcess(timezone.now())
result = process.get_foreign_ip()
assert [r['ip'] for r in result] == [
'123.222.222.222', '67.220.91.30', '133.242.187.207',
'212.219.142.207', '176.192.102.130',
]
data_copy = deepcopy(foreign_data) + deepcopy(ipv6_data) + deepcopy(
chinese_data)
for i, c in enumerate(data_copy):
c['occurred_at'] = datetime(2020, 12, 24, 8, 50 - i).isoformat()
process = self.create_proccessor()
for c in data_copy:
process.process(c)
process.save()
process = IPQueueProcess(timezone.now())
result = process.get_foreign_ip()
assert [r['ip'] for r in result] == [
'67.220.91.30', '133.242.187.207',
'212.219.142.207', '176.192.102.130', '92.103.174.236'
]
@pytest.mark.django_db
class TestAttackIPRank:
def create_processor(self):
current = timezone.now()
processor = PreProcess(current)
processor.set_next(AttackIPRank(current))
return processor
def test_external_ip_rank(self, data):
AttackIPRank.clean()
processor = self.create_processor()
for d in data:
processor.process(d)
processor.save()
attack = AttackIPRank(timezone.now())
src = [
{'ip': fake1, 'count': 10, 'percent': 100.0},
{'ip': fake3, 'count': 6, 'percent': 60.0},
{'ip': fake5, 'count': 2, 'percent': 20.0},
{'ip': fake6, 'count': 1, 'percent': 10.0}
]
assert attack.get_top_n_src_ip() == src
dst = [
{'ip': '192.168.1.1', 'count': 10, 'percent': 100.0},
{'ip': '172.16.15.1', 'count': 6, 'percent': 60.0},
{'ip': '172.16.16.1', 'count': 2, 'percent': 20.0},
{'ip': '10.4.4.4', 'count': 1, 'percent': 10.0},
]
assert attack.get_top_n_dst_ip() == dst
assert attack.get_top_n() == {
'src_ip': src, 'dst_ip': dst
}
| liushiwen555/unified_management_platform_backend | auditor/tests/test_protocol_synchronize.py | test_protocol_synchronize.py | py | 18,117 | python | en | code | 0 | github-code | 90 |
21639260894 | from django.shortcuts import render, HttpResponseRedirect, reverse
# Create your views here.
from App_post.models import PartnerRequestModel, JobPostModel, PartnerApplicationModel
def home(request):
return render(request, 'App_post/home.html')
def partner_request(request):
if request.method == 'POST':
title = request.POST.get('title')
participants = int(request.POST.get('total_participants'))
location = request.POST.get('location')
activity_type = request.POST.get('type')
duration = request.POST.get('project_duration')
skills = request.POST.get('required_skills')
deadline = request.POST.get('application_deadline')
file = request.FILES.get('related_file')
description = request.POST.get('description')
request_model = PartnerRequestModel(title=title, type=activity_type, total_participants=participants,
project_duration=duration, location=location,
author=request.user, required_skills=skills,
application_deadline=deadline, related_file=file, description=description)
request_model.save()
return HttpResponseRedirect(reverse('App_post:home'))
return render(request, 'App_post/partner_request.html')
def job_post(request):
if request.method == 'POST':
job_title = request.POST.get('job_title')
company_name = request.POST.get('company_name')
position = request.POST.get('position')
work_type = request.POST.get('type')
vacancies = int(request.POST.get('total_vacancies'))
category = request.POST.get('category')
location = request.POST.get('location')
gender = request.POST.get('gender')
skills = request.POST.get('required_skills')
responsibilities = request.POST.get('job_responsibilities')
description = request.POST.get('job_description')
deadline = request.POST.get('application_deadline')
experience = request.POST.get('experience')
job_model = JobPostModel(author=request.user, job_title=job_title, company_name=company_name, position=position, category=category, work_type=work_type, total_vacancies=vacancies, location=location, required_skills=skills, job_description=description, job_responsibilities=responsibilities, experience=experience, gender_specification=gender, application_deadline=deadline)
job_model.save()
return HttpResponseRedirect(reverse('App_post:home'))
return render(request, 'App_post/job_post.html')
def display_partner_requests(request):
requests = PartnerRequestModel.objects.all()
content = {
'requests': requests,
}
return render(request, 'App_post/display_requests.html', context=content)
def apply_for_participation(request, pk):
project = PartnerRequestModel.objects.get(id=pk)
if request.method == 'POST':
reason = request.POST.get('reason')
apply_model = PartnerApplicationModel(activity=project, participant=request.user, reason_of_participation=reason)
apply_model.save()
return HttpResponseRedirect(reverse('App_post:home'))
return render(request, 'App_post/apply_for_participation.html')
| evana27perveen/WePoka | App_post/views.py | views.py | py | 3,366 | python | en | code | 0 | github-code | 90 |
42291859717 | import numpy as np
import crocoddyl
class CostModelDoublePendulum(crocoddyl.CostModelAbstract):
def __init__(self, state, activation, nu):
activation = (
activation
if activation is not None
else crocoddyl.ActivationModelQuad(state.ndx)
)
crocoddyl.CostModelAbstract.__init__(self, state, activation, nu=nu)
def calc(self, data, x, u):
c1, c2 = np.cos(x[0]), np.cos(x[1])
s1, s2 = np.sin(x[0]), np.sin(x[1])
data.residual.r[:] = np.array([s1, s2, 1 - c1, 1 - c2, x[2], x[3]])
self.activation.calc(data.activation, data.residual.r)
data.cost = data.activation.a_value
def calcDiff(self, data, x, u):
c1, c2 = np.cos(x[0]), np.cos(x[1])
s1, s2 = np.sin(x[0]), np.sin(x[1])
self.activation.calcDiff(data.activation, data.residual.r)
data.residual.Rx[:2, :2] = np.diag([c1, c2])
data.residual.Rx[2:4, :2] = np.diag([s1, s2])
data.residual.Rx[4:6, 2:4] = np.diag([1, 1])
data.Lx[:] = np.dot(data.residual.Rx.T, data.activation.Ar)
data.Rxx[:2, :2] = np.diag([c1**2 - s1**2, c2**2 - s2**2])
data.Rxx[2:4, :2] = np.diag([s1**2 + (1 - c1) * c1, s2**2 + (1 - c2) * c2])
data.Rxx[4:6, 2:4] = np.diag([1, 1])
data.Lxx[:, :] = np.diag(np.dot(data.Rxx.T, np.diag(data.activation.Arr)))
def createData(self, collector):
data = CostDataDoublePendulum(self, collector)
return data
class CostDataDoublePendulum(crocoddyl.CostDataAbstract):
def __init__(self, model, collector):
crocoddyl.CostDataAbstract.__init__(self, model, collector)
self.Rxx = np.zeros((6, 4))
class ActuationModelDoublePendulum(crocoddyl.ActuationModelAbstract):
def __init__(self, state, actLink):
crocoddyl.ActuationModelAbstract.__init__(self, state, 1)
self.nv = state.nv
self.actLink = actLink
def calc(self, data, x, u):
data.tau[:] = data.dtau_du * u
def calcDiff(self, data, x, u):
pass
def commands(self, data, x, tau):
if self.actLink == 1:
data.u[:] = tau[0]
else:
data.u[:] = tau[1]
def torqueTransform(self, data, x, tau):
pass
def createData(self):
data = ActuationDataDoublePendulum(self)
return data
class ActuationDataDoublePendulum(crocoddyl.ActuationDataAbstract):
def __init__(self, model):
crocoddyl.ActuationDataAbstract.__init__(self, model)
if model.actLink == 1:
self.dtau_du[0] = 1.0
self.tau_set = [True, False]
self.Mtau[0] = 1.0
else:
self.dtau_du[1] = 1.0
self.tau_set = [False, True]
self.Mtau[1] = 1.0
| loco-3d/crocoddyl | bindings/python/crocoddyl/utils/pendulum.py | pendulum.py | py | 2,778 | python | en | code | 584 | github-code | 90 |
72483618858 | import wave
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import random
wf = wave.open('../input/flute.wav')
ch = wf.getnchannels()
fn = wf.getnframes()
amp = (2**8) ** wf.getsampwidth() / 2
data = wf.readframes(fn)
data = np.frombuffer(data, 'int16')
data = data / amp # 振幅正規化
data = data[::ch]
st = 10000 # サンプリングする開始位置
size = 1024 # FFTのサンプル数
start = 0 # 乱数の開始位置
end = 1000 # 乱数の終点位置
hammingWindow = np.hamming(size)
fs = wf.getframerate()
d = 1.0 / fs
freqList = np.fft.fftfreq(size, d)
n = random.randint(start, end)
windowedData = hammingWindow * data[st:st+size]
data = np.fft.fft(windowedData)
data = data / max(abs(data))
plt.plot(freqList, abs(data))
plt.axis([0, fs/16, 0, 1])
plt.title('flute')
plt.xlabel('Frequency[Hz]')
plt.ylabel('amplitude spectrum')
plt.show() | ymt117/Mute_speaker | src/flute_freq_spectrum2.py | flute_freq_spectrum2.py | py | 934 | python | en | code | 0 | github-code | 90 |
1859653235 | # Converts a hexadecimal number (input) to binary number
hex_num = input('Enter a hexadecimal number to convert to binary: ') # Unnecesary conversion, just for assuring '.' is included in the string
print(hex_num, 'in decimal is', end=' ')
is_neg = None
resul = ''
if hex_num[0] == '-':
is_neg = True
hex_num = hex_num[1:] # Eliminates the '-'
else:
is_neg = False
for c in hex_num:
if c.isdigit():
c = int(c)
aux = ''
while c > 0:
aux = str(c % 2) + aux # Process for conversion
c //= 2
resul += ('0' * (4 - len(aux)) + aux) # Addition of 0s to complete the four bits represented in hexadecimal, + ' ' could be added to make a separation
else: # switch for non-numerical hexadecimal digits
if c == 'A':
c = '1010'
elif c == 'B':
c = '1011'
elif c == 'C':
c = '1100'
elif c == 'D':
c = '1101'
elif c == 'E':
c = '1110'
elif c == 'F':
c = '1111'
resul += c # + ' ' could be added to make a separation between each 4 bits
if is_neg:
resul = '-' + resul
print(resul)
| rubengr16/OSSU | ComputerScience/2_MIT6.00.1x_Introduction_to_Computer_Science/3_Simple_Algorithms/hexadecimal_to_binary_float.py | hexadecimal_to_binary_float.py | py | 1,182 | python | en | code | 0 | github-code | 90 |
4039836372 | import threading
import numpy as np
import dotsandboxes.gui.global_var
from Arena import Arena
#from dotsandboxes.gui.main import GUI
import time
from dotsandboxes.gui import global_var
class RandomPlayer:
def __init__(self, game):
self.game = game
def play(self, board):
a = np.random.randint(self.game.getActionSize())
valids = self.game.getValidMoves(board, 1)
while valids[a]!=1:
a = np.random.randint(self.game.getActionSize())
return a
# Will play at random, unless there's a chance to score a square
class GreedyRandomPlayer:
def __init__(self, game):
self.game = game
def play(self, board):
valids = self.game.getValidMoves(board, 1)
previous_score = board[0, -1]
for action in np.nonzero(valids)[0]:
new_board, _ = self.game.getNextState(board, 1, action)
new_score = new_board[0, -1]
if new_score > previous_score:
return action
a = np.random.randint(self.game.getActionSize())
while valids[a]!=1:
a = np.random.randint(self.game.getActionSize())
return a
#
# class HumanDotsAndBoxesPlayer:
# def __init__(self, game):
# self.game = game
#
# def play(self, board):
# if board[2][-1] == 1:
# # We have to pass
# return self.game.getActionSize() - 1
# valids = self.game.getValidMoves(board, 1)
# while True:
# print("Valid moves: {}".format(np.where(valids == True)[0]))
# #a = int(input())
# try:
# while not GUI.edge_number: # Wait until GUI.edge_number is updated
# time.sleep(0.1) # Wait for 100 milliseconds
# a = GUI.edge_number[-1]
# print("a:"+str(a))
# if valids[a]:
# return a
# except ValueError:
# print("Invalid input. Please enter an integer.")
class HumanDotsAndBoxesPlayer:
def __init__(self, game):
self.game = game
def play(self, board):
if board[2][-1] == 1:
# We have to pass
return self.game.getActionSize() - 1
valids = self.game.getValidMoves(board, 1)
# event = threading.Event() # Create a new event
# t = threading.Thread(target=GUI.create_board, args=(Arena.action_copy[-1], event))
# t.start()
# event.wait() # Wait for the event to be set
while True:
#print("Valid moves: {}".format(np.where(valids == True)[0]))
try:
while not global_var.edge_number: # Wait until GUI.edge_number is updated
time.sleep(0.1) # Wait for 100 milliseconds
a = global_var.edge_number[-1]
#print("a:"+str(a))
if valids[a]:
return a
except ValueError:
pass
#print("Invalid input. Please enter an integer.")
| cuijiayu20/alphazero_dotsandboxes | alpha-zero-general 1.2_now/dotsandboxes/DotsAndBoxesPlayers.py | DotsAndBoxesPlayers.py | py | 3,118 | python | en | code | 0 | github-code | 90 |
29108212606 |
import socket
import time
import os
import datetime
from threading import Thread
#from scapy.all import ARP, Ether, srp
#-----------------------------------------------------------------------------#
# Locating Nodes Part #
#-----------------------------------------------------------------------------#
# 1st Method to find nodes in the network using socket
print(f"\nNode IP address {socket.gethostbyname(socket.gethostname())}")
nodes = [] # list of all the active nodes
FORMAT = 'utf-8' # format to encode/decode byte to string and vice versa
PORT = 65434 # shared port in all the nodes
# Purpose: Broadcast to every node in the subnet desired with the message of sender's own IP address
def broadcast_LAN():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # using a UDP socket
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # reused same address
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # tells the network stack that the socket will be used as a broadcast
s.sendto(socket.gethostbyname(socket.gethostname()).encode(FORMAT), ("192.168.0.255", PORT)) # sends IP address to a broadcast address to be received from everyone
# Purpose: Listens in for any broadcast message in the network to build up their nodes list. Its agrument is the stop function to kill the thread.
def listen_broadcast(stop):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind(("", PORT)) # binds to all interfaces if left "" for the address argument
# Keeps looping to receive message from every node in the network
while True:
data, addr = s.recvfrom(1024) # returns back a tuple of "(socket connection, src address)"
node = data.decode(FORMAT); # decodes data received to obtain a string type
# kill the thread from looping forever
if stop():
break
# checks if we already have the node in the list of nodes and if that node is not ourselves
if node not in nodes and node != socket.gethostbyname(socket.gethostname()):
nodes.append(node) # adds the node to our list
stop = False # function for killing a thread
# Loops twice to fully obtain all the nodes in the network and creates two threads (one for broadcasting message and the other for listening for that broadcasted message)
for x in range(2):
time.sleep(1)
Thread(target = listen_broadcast, args = (lambda: stop,)).start() # creates a thread and starts it to listen for broadcasted messages and passes a stop argument to kill the thread
time.sleep(1)
Thread(target = broadcast_LAN).start() # creates a thread and starts it to broadcast a message to each node in the network
stop = True
# 2nd Method to find nodes in the network using scapy
##print(f"\nNode socket address = {socket.gethostbyname(socket.gethostname())}:PORT")
##
##time.sleep(1)
### Using ARP request broadcast to find nodes in the network. This way who ever response back are active nodes
##result = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="192.168.0.0/24"), timeout=3, verbose=0)[0]
### Filter the result to only the src address of the responses
##nodes = [received.psrc for sent, received in result[1:]]
#-----------------------------------------------------------------------------#
# Printing Network Nodes and Files (before sync) #
#-----------------------------------------------------------------------------#
PATH = "/files"
print("\nAvailable devices in the network:")
print(nodes)
print("\nLists of all the files before sync:")
print(os.listdir(PATH))
#-----------------------------------------------------------------------------#
# File Synchronization Part #
#-----------------------------------------------------------------------------#
SLASH = "/"
# Purpose: To keep receiving files from the node's server and creating a new file in a specific directory if we don't have the file or the content/timestamps are different. Its argument is the socket connection to the node.
def server_handler(s):
#files = os.listdir(PATH)
#print(f"\nClient: {files}")
# infinite loop to keep obtaining files
while True:
filename_size = s.recv(16).decode(FORMAT) # returns either the file's name size or an empty message indicating there is no more files to receive
# checks if we received the file's name size or an empty byte
if not filename_size:
break
#print(f"Client: {filename_size}")
filename_size = int(filename_size, 2) # converts the fixed binary file's name size to an integer
#print(f"Client: {filename_size}")
filename = s.recv(filename_size).decode(FORMAT) # passes the file's name size integer to the socket.recv function to obtain the full file's name
#print(f"Client: {filename}")
filesize = s.recv(32).decode(FORMAT) # obtains the file's size and decodes the byte to a string type
#print(f"Client: {filesize}")
filesize = int(filesize, 2) # converts the fixed binary file's size to an integer
#print(f"Client: {filesize}")
mtime = s.recv(42).decode(FORMAT) # gets the file's modification date and decodes the byte to a string type
#print(f"Client: {mtime}")
mtime = int(mtime, 2) # converts the fixed binary file's modification date to an integer
#print(f"Client: {mtime}")
same = True # tells whether there is a file with the same name but different details found
found = False # tells whether the file is already in the directory
for file in os.listdir(PATH):
if file == filename:
if os.path.getsize(PATH+SLASH+file) != filesize or os.stat(PATH+SLASH+file).st_mtime != mtime:
same = False
found = True
# checks the node we already have the file in the directory. If we do then it tells the node we already have it, else we tell the node we need that file.
if same and found:
#print("THIS RAN")
s.send(b'HAVE_FILE')
continue
else:
#print("HELLO WORLD")
s.send(b'NEED_FILE')
file = open(PATH+SLASH+filename, 'wb') # opens the file to start writing bytes into it
CHUNKSIZE = 4096 # default bytes received from the node
# loops to obtain all the file's data until the file's size is met
while filesize > 0:
# checks if the default byte is too large than the file's size
if filesize < CHUNKSIZE:
CHUNKSIZE = filesize
data = s.recv(CHUNKSIZE) # receives the data from the node
file.write(data) # writes that data into the file
filesize -= len(data) # decrease the file's size since we add that byte data into the file
file.close() # closes the file
os.utime(PATH+SLASH+filename,(mtime,mtime)) # changes the new file's modification and access date to the original file so it can resemble the same file
#print("Finished at Client side")
s.close() # closes the socket connection
# Purpose: Creates a new socket each time to connect to every node in the network and each connect is handled by a thread
def start_client():
# Loops through each node in nodes list
for node in nodes:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # creates a TCP socket to communicate
s.connect((node, PORT)) # connects to a specific node through a port
t1 = Thread(target = server_handler, args = (s, )) # creates a thread to control the receiving file section
t1.start() # starts the thread
t1.join() # blocks everything below until the thread is done with its task
# Purpose: To loop through each file in a specific directory and send its details to each node in the network. Its agrument is the socket connection of the node connected.
def client_handler(conn):
#files = os.listdir(PATH)
#print(f"Server: {files}")
# loop through each file
for file in os.listdir(PATH):
filename = file # the name of the file
#print(f"Server: {filename}")
filename_size = len(filename) # the length of the file's name
#print(f"Server: {filename_size}")
filename_size = bin(filename_size)[2:].zfill(16) # creates a fixed binary length to send to the node so it can receive the file's name completely without any byte loss
#print(f"Server: {filename_size}")
conn.send(filename_size.encode(FORMAT)) # sends the file's name size to the node
conn.send(filename.encode(FORMAT)) # sends the file's name to the node
filesize = os.path.getsize(PATH+SLASH+filename) # obtains the file's content size
#print(f"Server: {filesize}")
filesize = bin(filesize)[2:].zfill(32) # creates a fixed binary length of the file's content size to send to the node
#print(f"Server: {filesize}")
conn.send(filesize.encode(FORMAT)) # sends the file's data size
mtime = int(os.stat(PATH+SLASH+filename).st_mtime) # gets the file's modification date (as an integer)
#print(f"Server: {mtime}")
mtime = bin(mtime)[2:].zfill(42) # creates a fixed binary length of the file's modification date to send to the node
#print(f"Server: {mtime}")
#print(datetime.datetime.fromtimestamp(os.stat(PATH+SLASH+filename).st_mtime))
conn.send(mtime.encode(FORMAT)) # sends the file's modification date
# checks if the node already has the file, else it proceeds to sending the data of the file
if conn.recv(9) == b'HAVE_FILE':
continue
_file = open(PATH+SLASH+filename, 'rb') # opens the file to read its data in binary
data = _file.read() # reads the whole content
conn.sendall(data) # sends all the data to the node
_file.close() # closes the file
#print("File Sent")
#print("Done sending")
conn.close() # closes the socket connection
# Purpose: Creates a socket to listen for nodes that wants to connect. Keeps accepting nodes and creates a thread to handle sending file section for that node.
def start_server():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", PORT))
s.listen() # listens for any node wanting to connect, if left empty then there is no limit to accepting nodes
# infinite loop to keep accepting nodes
while True:
conn, addr = s.accept() # Accepts a node and returns back a tuple of (socket connection, src address). This accept function also blocks everything below until it connects to a node.
#print("\nGot connection from", addr)
t1 = Thread(target = client_handler, args = (conn, )) # creates a thread to control sending of files to the connected node
t1.start() # starts the thread
t1.join() # waits for the thread to finishing its task
s.close() # closes the socket
time.sleep(2)
t1 = Thread(target = start_server) # creates a thread to first start up the server part so it can start listening for connections
t1.daemon = True # sets the server thread's daemon to True since the server is in a infinite loop and in a accept blocking state. This way once the process to over, the thread is killed after.
t1.start() # starts the server thread
time.sleep(1)
Thread(target = start_client).start() # creates and starts a client thread to receive new files from other nodes
time.sleep(8)
#-----------------------------------------------------------------------------#
# Printing Network Nodes and Files (after sync) #
#-----------------------------------------------------------------------------#
print("\nAvailable devices in the network:")
print(nodes)
print("\nLists of all the files after sync:")
print(os.listdir(PATH))
| CristofearSantillan/Peer-to-Peer-Network | main.py | main.py | py | 12,489 | python | en | code | 0 | github-code | 90 |
70943698857 | import scrapy
from fed_scraper.items import FedScraperItem, serialize_url
from fed_scraper.parse_pdf import parse_pdf_from_url
import re
MEETING_DATES_FILE_PATH = "../meeting_dates.csv"
class BeigeBookArchiveSpider(scrapy.Spider):
name = "beige_book_archive"
allowed_domains = ["www.federalreserve.gov"]
start_urls = [
"https://www.federalreserve.gov/monetarypolicy/beige-book-archive.htm"
]
custom_settings = {
"ITEM_PIPELINES": {
"fed_scraper.pipelines.DuplicateUrlPipeline": 50,
"fed_scraper.pipelines.TextPipeline": 100,
"fed_scraper.pipelines.MeetingDatesPipeline": 150,
"fed_scraper.pipelines.RemoveMissingPipeline": 175,
"fed_scraper.pipelines.CsvPipeline": 200,
"fed_scraper.pipelines.SortByMeetingDatePipeline": 250,
"fed_scraper.pipelines.DuplicatesPipeline": 300,
"fed_scraper.pipelines.SplitCsvPipeline": 400,
}
}
def parse(self, response):
anchors = response.css(".panel-body a")
for anchor in anchors:
year = anchor.css("::text").get()
year_page_url = anchor.css("::attr(href)").get()
yield response.follow(
year_page_url,
callback=self.parse_year_page,
cb_kwargs={"year": year},
)
def parse_year_page(self, response, year):
rows = response.css("tbody tr")
for row in rows:
date_str = " ".join([row.css("td *::text").get(), year])
beige_book = FedScraperItem(
document_kind="beige_book",
release_date=date_str,
)
anchors = row.css("a")
for anchor in anchors:
if bool(re.search(r"PDF", anchor.css("::text").get(), re.I)):
beige_book["url"] = anchor.css("::attr(href)").get()
break
elif bool(re.search(r"HTML", anchor.css("::text").get(), re.I)):
beige_book["url"] = anchor.css("::attr(href)").get()
if bool(re.search(r".pdf", beige_book.get("url"))):
beige_book["text"] = parse_pdf_from_url(
serialize_url(beige_book.get("url"))
)
yield beige_book
elif bool(re.search(r".htm", beige_book.get("url"))):
yield response.follow(
beige_book["url"],
callback=self.parse_html_beige_book,
cb_kwargs={"beige_book": beige_book},
)
def parse_html_beige_book(self, response, beige_book):
beige_book["text"] = response.css("#article *::text").getall()
if beige_book["text"] == []:
beige_book["text"] = response.css("p *::text").getall()
yield beige_book
| rw19842/Fed-Scraper | fed_scraper/fed_scraper/spiders/beige_book_archive.py | beige_book_archive.py | py | 2,849 | python | en | code | 1 | github-code | 90 |
12350292707 | from typing import List
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
minute = 0
length = len(grid)
width = len(grid[0])
# count = 0
return minute
a = Solution()
b = [[2,1,1],[1,1,0],[0,1,1]]
b = [[2,1,1],[0,1,1],[1,0,1]]
b = [[0,2]]
print(a.orangesRotting(b)) | Panamera-Turbo/MyPython | leetcode/994-orange.py | 994-orange.py | py | 345 | python | en | code | 0 | github-code | 90 |
9594035472 | import os
from patient import Patient
import datetime
import matplotlib.pyplot as plt
import discord
from dotenv import load_dotenv
import asyncio
from discord.ext import commands, tasks
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
bot = commands.Bot(command_prefix='!')
@bot.event
async def on_ready():
print('Bot is ready.')
patientlist = {}
@bot.command(name='new-prescription')
async def new_prescription(ctx, patient: discord.Member):
await ctx.send(f"What medication is to be prescribed?")
medication = await bot.wait_for("message", check=None)
await ctx.send(f"What is the amount of {medication.content} that is to be taken?")
amount = await bot.wait_for("message", check=None)
await ctx.send("Enter the time to set the patient's reminder.")
time_entry = await bot.wait_for("message", check=None)
hour, minute = map(int, time_entry.content.split(':'))
reminder = datetime.time(hour, minute)
await ctx.send(f"Prescription set for {patient.name}!")
patientlist[patient.id] = Patient(patient, medication.content, amount.content, reminder)
embed=discord.Embed(title="A new prescription reminder has been set for you!", description="Prescription Number", color=0x539cea)
embed.set_thumbnail(url="https://image.flaticon.com/icons/png/512/2397/2397639.png")
embed.add_field(name="Medication", value=medication.content, inline=True)
embed.add_field(name="Amount", value=amount.content, inline=True)
embed.add_field(name="Time", value=reminder.strftime("%I:%M %p"), inline=True)
await patient.send(embed=embed)
@bot.command(name='info')
async def prescription_info(ctx, patient: discord.Member):
#define figure and axes
fig, ax = plt.subplots()
#create values for table
table_data=[
["Patient Name", patient.name],
["Prescription Number", None],
["Medication", patientlist[patient.id].medication],
["Amount", patientlist[patient.id].amount],
["Time", patientlist[patient.id].reminder.strftime("%I:%M %p")]
]
#create table
table = ax.table(cellText=table_data, loc='center')
#modify table
table.set_fontsize(9)
table.scale(1,4)
ax.axis('off')
#display table
plt.savefig('images/patientinfo.png', bbox_inches='tight', dpi=150)
file = discord.File("images/patientinfo.png", filename="patientinfo.png")
embed = discord.Embed()
embed.set_image(url="attachment://patientinfo.png")
await ctx.send(file=file, embed=embed)
@tasks.loop(seconds=60.0)
async def remindertask():
for key in patientlist:
now = datetime.datetime.now()
if now.strftime("%H:%M")==patientlist[key].reminder.strftime("%H:%M"):
embed=discord.Embed(title="It is time to take your medication!", description="Prescription Number", color=0x539cea)
embed.add_field(name="Medication", value=patientlist[key].medication, inline=True)
embed.add_field(name="Amount", value=patientlist[key].amount, inline=True)
embed.set_footer(text="Respond by typing \"Done\" confirm that you have taken your medication!")
dm = await patientlist[key].member.send(embed=embed)
def check(m):
return isinstance(m.channel, discord.DMChannel) and m.content == "Done"
try:
confirmation = await bot.wait_for("message", timeout=15.0, check=check)
if confirmation:
await patientlist[key].member.send("Confirmed!")
except asyncio.TimeoutError:
channel = bot.get_channel(805167613272915989)
await channel.send(f"{patientlist[key].member.name} missed their reminder.")
else:
next
@bot.command(name='start-reminders')
async def start_reminder(ctx):
remindertask.start()
bot.run(TOKEN) | benjshao/MediBot | bot.py | bot.py | py | 4,011 | python | en | code | 0 | github-code | 90 |
2401327859 |
from camera import Camera
import open3d as o3d
import cv2
import os
if __name__ == "__main__":
cam = Camera([])
pcd = o3d.geometry.PointCloud()
vis = o3d.visualization.Visualizer()
vis.create_window("Point Clouds", width=848, height=480)
added = True
rgb_img, depth_img = cam.stream(colored_depth=False)
xyz = cam.generate(depth_img)
xyz = cam.cropPoints()
pcd.points = o3d.utility.Vector3dVector(xyz)
ref_path = os.getcwd()
o3d.io.write_point_cloud(ref_path+"/pcd_data/test.pcd", pcd)
while 1:
rgb_img, depth_img = cam.stream(colored_depth=False)
xyz = cam.generate(depth_img)
# cam.detectCharuco()
xyz = cam.cropPoints()
pcd.points = o3d.utility.Vector3dVector(xyz)
## visualize rgb and depth image
cv2.imshow("rgb", rgb_img)
cv2.imshow("depth", depth_img)
cv2.waitKey(1)
## visualize point cloud caculated from the depth image
if added == True:
vis.add_geometry(pcd)
added = False
vis.update_geometry(pcd)
vis.poll_events()
vis.update_renderer() | juyong0000/pose_estimation | utilities/HowToCapturePcd/capture_pcd/save_pcd.py | save_pcd.py | py | 1,143 | python | en | code | 0 | github-code | 90 |
33990635386 | TC=int(input())
for i in range(TC):
N = int(input())
ind = [list(map(int, input().split())) for _ in range(N)]
res = 0
for j in range(N - 1):
for k in range(j + 1, N):
l1, r1 = ind[j]
l2, r2 = ind[k]
if ((l1 > l2 and r1 < r2) or (l1 < l2 and r1 > r2)):
res+=1
print(f'#{i+1} {res}') | eunjakim98/Algorithm_Python | SWEA/D3/10580. 전봇대/전봇대.py | 전봇대.py | py | 364 | python | en | code | 0 | github-code | 90 |
27705974855 | import os
import dgl
import torch
import random
import numpy as np
import pandas as pd
import scipy.sparse as sp
from scipy.spatial import distance_matrix
from sklearn.metrics import roc_auc_score, f1_score, accuracy_score
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import torch.optim as optim
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def build_relationship(x, thresh=0.25):
df_euclid = pd.DataFrame(1 / (1 + distance_matrix(x.T.T, x.T.T)), columns=x.T.columns, index=x.T.columns)
df_euclid = df_euclid.to_numpy()
idx_map = []
for ind in range(df_euclid.shape[0]):
max_sim = np.sort(df_euclid[ind, :])[-2]
neig_id = np.where(df_euclid[ind, :] > thresh*max_sim)[0]
import random
random.seed(912)
random.shuffle(neig_id)
for neig in neig_id:
if neig != ind:
idx_map.append([ind, neig])
# print('building edge relationship complete')
idx_map = np.array(idx_map)
return idx_map
def load_pokec(dataset, sens_attr, predict_attr, path="./datasets/pokec/", label_number=1000, sens_number=500,
seed=19, test_idx=False):
"""Load data"""
# print('Loading {} dataset from {}'.format(dataset, path))
idx_features_labels = pd.read_csv(os.path.join(path, "{}.csv".format(dataset)))
header = list(idx_features_labels.columns)
header.remove("user_id")
# header.remove(sens_attr)
header.remove(predict_attr)
features = sp.csr_matrix(idx_features_labels[header], dtype=np.float32)
labels = idx_features_labels[predict_attr].values
# build graph
idx = np.array(idx_features_labels["user_id"], dtype=int)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(os.path.join(path, "{}_relationship.txt".format(dataset)), dtype=int)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=int).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = adj + sp.eye(adj.shape[0])
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(labels)
import random
random.seed(seed)
label_idx = np.where(labels >= 0)[0]
random.shuffle(label_idx)
idx_train = label_idx[:min(int(0.5 * len(label_idx)), label_number)]
idx_val = label_idx[int(0.5 * len(label_idx)):int(0.75 * len(label_idx))]
if test_idx:
idx_test = label_idx[label_number:]
idx_val = idx_test
else:
idx_test = label_idx[int(0.75 * len(label_idx)):]
sens = idx_features_labels[sens_attr].values
sens_idx = set(np.where(sens >= 0)[0])
idx_test = np.asarray(list(sens_idx & set(idx_test)))
sens = torch.FloatTensor(sens)
idx_sens_train = list(sens_idx - set(idx_val) - set(idx_test))
random.seed(seed)
random.shuffle(idx_sens_train)
idx_sens_train = torch.LongTensor(idx_sens_train[:sens_number])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test, sens, idx_sens_train
def load_bail(dataset, sens_attr="WHITE", predict_attr="RECID", path="./dataset/bail/", label_number=1000):
# print('Loading {} dataset from {}'.format(dataset, path))
idx_features_labels = pd.read_csv(os.path.join(path,"{}.csv".format(dataset)))
header = list(idx_features_labels.columns)
header.remove(predict_attr)
# header.remove(sens_attr)
# # Normalize School
# idx_features_labels['SCHOOL'] = 2*(idx_features_labels['SCHOOL']-idx_features_labels['SCHOOL'].min()).div(idx_features_labels['SCHOOL'].max() - idx_features_labels['SCHOOL'].min()) - 1
# # Normalize RULE
# idx_features_labels['RULE'] = 2*(idx_features_labels['RULE']-idx_features_labels['RULE'].min()).div(idx_features_labels['RULE'].max() - idx_features_labels['RULE'].min()) - 1
# # Normalize AGE
# idx_features_labels['AGE'] = 2*(idx_features_labels['AGE']-idx_features_labels['AGE'].min()).div(idx_features_labels['AGE'].max() - idx_features_labels['AGE'].min()) - 1
# # Normalize TSERVD
# idx_features_labels['TSERVD'] = 2*(idx_features_labels['TSERVD']-idx_features_labels['TSERVD'].min()).div(idx_features_labels['TSERVD'].max() - idx_features_labels['TSERVD'].min()) - 1
# # Normalize FOLLOW
# idx_features_labels['FOLLOW'] = 2*(idx_features_labels['FOLLOW']-idx_features_labels['FOLLOW'].min()).div(idx_features_labels['FOLLOW'].max() - idx_features_labels['FOLLOW'].min()) - 1
# # Normalize TIME
# idx_features_labels['TIME'] = 2*(idx_features_labels['TIME']-idx_features_labels['TIME'].min()).div(idx_features_labels['TIME'].max() - idx_features_labels['TIME'].min()) - 1
# build relationship
if os.path.exists(f'{path}/{dataset}_edges.txt'):
edges_unordered = np.genfromtxt(f'{path}/{dataset}_edges.txt').astype('int')
else:
edges_unordered = build_relationship(idx_features_labels[header], thresh=0.6)
np.savetxt(f'{path}/{dataset}_edges.txt', edges_unordered)
features = sp.csr_matrix(idx_features_labels[header], dtype=np.float32)
labels = idx_features_labels[predict_attr].values
idx = np.arange(features.shape[0])
idx_map = {j: i for i, j in enumerate(idx)}
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=int).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = adj + sp.eye(adj.shape[0])
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(labels)
import random
random.seed(20)
label_idx_0 = np.where(labels==0)[0]
label_idx_1 = np.where(labels==1)[0]
random.shuffle(label_idx_0)
random.shuffle(label_idx_1)
idx_train = np.append(label_idx_0[:min(int(0.5 * len(label_idx_0)), label_number//2)], label_idx_1[:min(int(0.5 * len(label_idx_1)), label_number//2)])
idx_val = np.append(label_idx_0[int(0.5 * len(label_idx_0)):int(0.75 * len(label_idx_0))], label_idx_1[int(0.5 * len(label_idx_1)):int(0.75 * len(label_idx_1))])
idx_test = np.append(label_idx_0[int(0.75 * len(label_idx_0)):], label_idx_1[int(0.75 * len(label_idx_1)):])
sens = idx_features_labels[sens_attr].values.astype(int)
sens = torch.FloatTensor(sens)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test, sens
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def feature_norm(features):
min_values = features.min(axis=0)[0]
max_values = features.max(axis=0)[0]
return 2*(features - min_values).div(max_values-min_values) - 1
def train_vanilla(model, optimizer, criterion, epochs, data, save_name):
best_loss = 100
for epoch in range(epochs):
model.train()
optimizer.zero_grad()
h, output = model(data.features, data.edge_index)
loss_train = criterion(output[data.idx_train], data.labels[data.idx_train].unsqueeze(1).float())
loss_train.backward()
optimizer.step()
model.eval()
h, output = model(data.features, data.edge_index)
loss_val = criterion(output[data.idx_val], data.labels[data.idx_val].unsqueeze(1).float())
if loss_val.item() < best_loss:
best_loss = loss_val.item()
torch.save(model.state_dict(), save_name)
def train_student(model, optimizer, criterion_bce, criterion_kd, args, data, save_name, soft_target):
best_loss = 100
best_result = 0
for epoch in range(args.epochs):
model.train()
optimizer.zero_grad()
h, output = model(data.features, data.edge_index)
loss_bce_train = criterion_bce(output[data.idx_train], data.labels[data.idx_train].unsqueeze(1).float())
loss_kd_train = criterion_kd(h[data.idx_train], soft_target[data.idx_train])
if epoch == 0:
weight_compute = AdaWeight(loss_bce_train, loss_kd_train, lr=args.lr_w, gamma=args.gamma)
lad1, lad2 = weight_compute.compute(loss_bce_train.item(), loss_kd_train.item())
loss_train = lad1 * loss_bce_train + lad2 * loss_kd_train
loss_train.backward()
optimizer.step()
model.eval()
with torch.no_grad():
h, output = model(data.features, data.edge_index)
loss_bce_val = criterion_bce(output[data.idx_val], data.labels[data.idx_val].unsqueeze(1).float())
loss_kd_val = criterion_kd(h[data.idx_val], soft_target[data.idx_val])
loss_val = loss_bce_val + loss_kd_val
output_preds = (output.squeeze() > 0).type_as(data.labels)
acc = accuracy_score(data.labels[data.idx_val].cpu().numpy(), output_preds[data.idx_val].cpu().numpy())
parity, equality = fair_metric(output_preds[data.idx_val].cpu().numpy(),
data.labels[data.idx_val].cpu().numpy(),
data.sens[data.idx_val].numpy())
if args.dataset == 'pokec_z' and args.model == 'gin':
if acc - 3*(parity + equality) > best_result:
best_result = acc - 3*(parity + equality)
torch.save(model.state_dict(), save_name)
else:
if loss_val.item() < best_loss:
best_loss = loss_val.item()
torch.save(model.state_dict(), save_name)
# print(f"[Train] Epoch {epoch}:bce_loss: {loss_bce_train.item():.4f} | kd_loss: {loss_kd_train.item():.4f} "
# f"| total_loss: {loss_train.item():.4f} | lad1: {lad1:.4f}, lad2: {lad2:.4f}")
def evaluation(model, weight_path, data):
model.load_state_dict(torch.load(weight_path))
model.eval()
with torch.no_grad():
h, output = model(data.features, data.edge_index)
output_preds = (output.squeeze() > 0).type_as(data.labels)
auc_test = roc_auc_score(data.labels.cpu().numpy()[data.idx_test.cpu()],
output.detach().cpu().numpy()[data.idx_test.cpu()])
f1_test = f1_score(data.labels[data.idx_test].cpu().numpy(), output_preds[data.idx_test].cpu().numpy())
acc_test = accuracy_score(data.labels[data.idx_test].cpu().numpy(), output_preds[data.idx_test].cpu().numpy())
dp_test, eo_test = fair_metric(output_preds[data.idx_test].cpu().numpy(), data.labels[data.idx_test].cpu().numpy(),
data.sens[data.idx_test].numpy())
return auc_test, f1_test, acc_test, dp_test, eo_test
def fair_metric(pred, labels, sens):
idx_s0 = sens==0
idx_s1 = sens==1
idx_s0_y1 = np.bitwise_and(idx_s0, labels==1)
idx_s1_y1 = np.bitwise_and(idx_s1, labels==1)
parity = abs(sum(pred[idx_s0])/sum(idx_s0)-sum(pred[idx_s1])/sum(idx_s1))
equality = abs(sum(pred[idx_s0_y1])/sum(idx_s0_y1)-sum(pred[idx_s1_y1])/sum(idx_s1_y1))
return parity.item(), equality.item()
class AdaWeight:
def __init__(self, loss1_init, loss2_init, weight_loss1=0.5, lr=0.025, gamma=0.25):
self.loss1_init = loss1_init
self.loss2_init = loss2_init
self.weight_loss1 = weight_loss1
self.lr = lr
self.gamma = gamma
def compute(self, loss1, loss2):
rela_loss1 = (loss1 / self.loss1_init.item())**self.gamma
rela_loss2 = (loss2 / self.loss2_init.item())**self.gamma
rela_weight_loss1 = rela_loss1 / (rela_loss1 + rela_loss2)
self.weight_loss1 = self.lr * rela_weight_loss1 + (1 - self.lr) * self.weight_loss1
self.weight_loss2 = 1 - self.weight_loss1
return self.weight_loss1, self.weight_loss2
class ContLoss(_Loss):
def __init__(self, reduction='mean', tem: float=0.5):
super(ContLoss, self).__init__()
self.reduction = reduction
self.tem: float = tem
def sim(self, h1: torch.Tensor, h2: torch.Tensor):
h1 = F.normalize(h1)
h2 = F.normalize(h2)
return torch.mm(h1, h2.t())
def loss(self, h1: torch.Tensor, h2: torch.Tensor):
f = lambda x: torch.exp(x / self.tem)
intra_sim = f(self.sim(h1, h1))
inter_sim = f(self.sim(h1, h2))
return -torch.log(inter_sim.diag() / (inter_sim.sum(1) + intra_sim.sum(1) - intra_sim.diag()))
def forward(self, h1: torch.Tensor, h2: torch.Tensor):
l1 = self.loss(h1, h2)
l2 = self.loss(h2, h1)
ret = (l1 + l2) / 0.5
ret = ret.mean() if self.reduction=='mean' else ret.sum()
return ret
| ZzoomD/FairGKD | utils.py | utils.py | py | 13,623 | python | en | code | 0 | github-code | 90 |
34980334805 | #!/usr/bin/env python
import sys
from functools import reduce
from collections import Counter
from collections import defaultdict
def reducer():
"""Input: stdin or hastag and number of times it occurs.
Output: hastag followed by number of times hastag occurs ranked
by frequency."""
top_hashtags = defaultdict(int)
current_hashtag = ''
for line in sys.stdin:
for hashtag_number in line.strip().split('/t'):
try:
int(hashtag_number) # this is the number
hashtag_number = int(hashtag_number)
top_hashtags[current_hashtag] += hashtag_number
except ValueError:
if len(hashtag_number) > 1:
current_hashtag = hashtag_number
for hash_t, num in Counter(top_hashtags).most_common(20):
print("{}\t{}".format(hash_t, num))
if __name__ == "__main__":
reducer()
| dannypaz/class | dsci-6007/4.3 - MapReduce Intro/lab-4.3-top-ten-hashtags-Jonathan-Jaime.py | lab-4.3-top-ten-hashtags-Jonathan-Jaime.py | py | 912 | python | en | code | 3 | github-code | 90 |
5737702494 | class User:
bank_name = "International Bank"
def __init__(self, name):
self.name = name
self.account_balance = 0
#deposit method
def make_deposit(self, deposited_amount):
self.account_balance += deposited_amount
#withdrwal method
def make_withdrawal(self, withdrew_amount):
self.account_balance -= withdrew_amount
#balance display method
def display_user_balance(self):
print(f"User: {self.name}, Account Balance: {self.account_balance}")
#transfer money method
def transfer_money(self, account_balance, user):
self.account_balance -= account_balance
user.account_balance += account_balance
self.display_user_balance()
user.display_user_balance()
#first user instances
amani = User("Amani Mkamba")
amani.make_deposit(500)
amani.make_deposit(600)
amani.make_withdrawal(100)
amani.display_user_balance()
#second user instances
michael = User("Michael Baruti")
michael.make_deposit(1000)
michael.make_deposit(500)
michael.make_withdrawal(200)
michael.make_withdrawal(100)
michael.display_user_balance()
#third user instances
angel = User("Angel Gabriel")
angel.make_deposit(100)
angel.make_withdrawal(50)
angel.make_withdrawal(80)
angel.make_withdrawal(30)
angel.display_user_balance()
amani.transfer_money(100, angel)
| AmanielyMkamba/python_algo | user_assignment.py | user_assignment.py | py | 1,337 | python | en | code | 0 | github-code | 90 |
69966761898 | import tensorflow as tf
import numpy as np
from pathlib import Path
import collections
from tensorflow.contrib import rnn
import pickle
import os
import datetime
import unicodedata
from tensorflow.python.client import device_lib
import matplotlib.pyplot as plt
import re
# from sklearn.utils import shuffle as shuffle
import random
import string
from FileManager import FileManager, TextType
from LSTM import LSTM
from model_configuration import *
import sys
def build_dictionaries(words):
count = collections.Counter(words).most_common()
char_to_number_dict = dict()
for char, _ in count:
char_to_number_dict[char] = len(char_to_number_dict)
number_to_char_dict = dict(zip(char_to_number_dict.values(), char_to_number_dict.keys()))
return char_to_number_dict, number_to_char_dict
print('Tensorflow Version: ' + tf.__version__)
load_model_flag = sys.argv[1]
print ("Load train model " , load_model_flag)
file_manager = FileManager()
training_data = file_manager.get_cleaned_text(text_type=TextType.TRAIN)
testing_data = file_manager.get_cleaned_text(text_type=TextType.TEST)
print('Number of characters for training: {}'.format(len(training_data)))
print('Number of characters for testing: {}'.format(len(testing_data)))
char_list = list(training_data)
char_to_number_dict, number_to_char_dict = build_dictionaries(char_list)
num_of_unique_chars = len(number_to_char_dict)
print('Number of unique characters: {}'.format(num_of_unique_chars))
text_as_numbers = []
for x in char_list:
text_as_numbers.append(char_to_number_dict[x])
text_as_numbers = np.array(text_as_numbers)
print('Configuration : epocs %s layers %s hidden %s dropout %s' % (num_of_epochs , num_layers, n_hidden, use_droupout))
lstm_model = LSTM(n_hidden, num_layers, batch_size, learning_rate,
sequence_length, num_of_unique_chars)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
test_lstm_model = LSTM(n_hidden, num_layers, batch_size, learning_rate,
sequence_length, num_of_unique_chars, is_test_mode=True)
num_batches = int(len(text_as_numbers)/(batch_size * sequence_length)) + 1
# Split up text indices into subarrays, of equal size
batches = np.array_split(text_as_numbers, num_batches)
# Reshape each split into [batch_size, training_seq_len]
batches = [np.resize(x, [batch_size, sequence_length]) for x in batches]
skip_training = False
saver = tf.train.Saver(tf.global_variables())
# Initialize all variables
init = tf.global_variables_initializer()
train_loss = []
with tf.Session() as sess:
if not load_model_flag:
sess.run(init)
pretrain_time = datetime.datetime.now()
print('current time: ' + str(pretrain_time) + ' start training:')
else:
saver.restore(sess, file_manager.trained_model_path)
skip_training = True
print('trained model was loaded, skipped training')
if not skip_training:
for epoch in range(num_of_epochs):
random.shuffle(batches)
targets = [np.roll(x, -1, axis=1) for x in batches]
print('Epoch {}/{}:'.format(epoch + 1, num_of_epochs))
state = sess.run(lstm_model.initial_state)
for j, batch in enumerate(batches):
training_dict = {lstm_model.x_data: batch, lstm_model.y_output: targets[j]}
for i, (c, h) in enumerate(lstm_model.initial_state):
training_dict[c] = state[i].c
training_dict[h] = state[i].h
temp_loss, state, _ = sess.run([lstm_model.cost, lstm_model.final_state, lstm_model.train_op],
feed_dict=training_dict)
train_loss.append(temp_loss)
if j % 20 == 0:
print('Epoch: {}, Batch: {}/{}, Loss: {:.2f}'.format(epoch + 1, j+1, num_batches + 1, temp_loss))
if not load_model_flag:
save_path = saver.save(sess, file_manager.trained_model_path)
print('Done! training took: ' + str(datetime.datetime.now() - pretrain_time))
pretrain_time = datetime.datetime.now()
print('current time: ' + str(pretrain_time)+' start Testing:')
accuracy, cross_entropy = test_lstm_model.test_model(sess, testing_data, number_to_char_dict, char_to_number_dict)
print('accuracy: {}, cross entropy: {}'.format(accuracy, cross_entropy))
print('Done! testing took: ' + str(datetime.datetime.now() - pretrain_time))
pretrain_time = datetime.datetime.now()
print('current time: ' + str(pretrain_time)+' start generating data:')
# play with the size of training data you use
generated_text = test_lstm_model.generate_text(sess, testing_data[:50], number_to_char_dict, char_to_number_dict,
text_length_to_generate)
#print(generated_text)
print('Done! generating took: ' + str(datetime.datetime.now() - pretrain_time))
file_manager.save_results(accuracy, cross_entropy, generated_text)
plt.plot(train_loss, 'k-')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.show() | imosafi/LSTM | Code/main.py | main.py | py | 5,116 | python | en | code | 0 | github-code | 90 |
42853975565 | import sys, string, math
m = input()
if m == m[::-1] :
print('yes')
sys.exit()
n = 0
for i in m[::-1] :
if i == '0' :
n += 1
else :
break
s1 = '0'*n + m
if s1 == s1[::-1] :
print('yes')
else :
print('no')
| Shamabanu/python | quasi palindromic.py | quasi palindromic.py | py | 248 | python | en | code | 2 | github-code | 90 |
4964954192 | def create_table(connection, tbl_name, col_name, col_type, row_list) :
import math
import dismod_at
import copy
primary_key = tbl_name + '_id'
name_column = tbl_name + '_name'
#
cmd = 'create table ' + tbl_name + '('
n_col = len( col_name )
cmd += '\n\t' + tbl_name + '_id integer primary key'
for j in range(n_col) :
assert col_name != primary_key
cmd += ',\n\t' + col_name[j] + ' ' + col_type[j]
if col_name[j] == name_column :
cmd += ' unique'
cmd += '\n\t);'
#
cursor = connection.cursor()
cursor.execute(cmd)
#
quote_text = True
for i in range( len(row_list) ) :
row_cpy = copy.copy(row_list[i])
for j in range( len(row_cpy) ) :
if col_type[j] == 'real' :
if row_cpy[j] != None :
if math.isnan( float(row_cpy[j]) ) :
name = col_name[j]
msg = f'create_table: row_list[{i}][{j}] is nan and the '
msg += f'column type is real (the column name is {name} ).'
assert False, msg
row_cpy.insert(0, i)
value_tuple = dismod_at.unicode_tuple(row_cpy, quote_text)
cmd = 'insert into ' + tbl_name + ' values ' + value_tuple
cursor.execute(cmd)
connection.commit()
| bradbell/dismod_at | python/dismod_at/create_table.py | create_table.py | py | 1,304 | python | en | code | 6 | github-code | 90 |
72677807657 | import unittest
import numpy as np
import torch
from pero_ocr.decoding.decoders import BLANK_SYMBOL
from pero_ocr.decoding.decoders import find_new_prefixes
from pero_ocr.decoding.decoders import GreedyDecoder
from pero_ocr.decoding.decoders import CTCPrefixLogRawNumpyDecoder
from pero_ocr.decoding.decoders import get_old_prefixes_positions, get_new_prefixes_positions
from pero_ocr.decoding.decoders import update_lm_things
from pero_ocr.decoding.lm_wrapper import HiddenState
from .test_lm_wrapper import DummyLm
class CTCPrefixDecodersBeam1Tests:
def test_single_frame(self):
logits = np.asarray([
[0, -80.0, -80.0, -80.0],
])
boh = self.decoder(logits)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'a')
def test_single_blank_score(self):
logits = np.asarray([
[-80.0, -80.0, -80.0, -5.0],
])
boh = self.decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, '')
self.assertEqual(boh._hyps[0].vis_sc, -5.0)
def test_trivial(self):
logits = np.asarray([
[0, -80.0, -80.0, -80.0],
[0, -80.0, -80.0, -80.0],
])
boh = self.decoder(logits)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'a')
def test_double_symbol(self):
logits = np.asarray([
[0, -80.0, -80.0, -80.0],
[-80.0, -80.0, -80.0, 0.0],
[0, -80.0, -80.0, -80.0],
])
boh = self.decoder(logits)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'aa')
def test_two_symbols_immediate(self):
logits = np.asarray([
[0, -80.0, -80.0, -80.0],
[-80.0, 0.0, -80.0, -80.0],
])
boh = self.decoder(logits)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'ab')
def test_continued_symbol(self):
logits = np.asarray([
[0, -80.0, -80.0, -80.0],
[0, -80.0, -80.0, -80.0],
[-80.0, -80.0, -80.0, 0.0],
])
boh = self.decoder(logits)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'a')
def test_continued_symbol_regression(self):
logits = np.asarray([
[-7e-2, -80.0, -80.0, -2.0],
[-4e-4, -80.0, -80.0, -7.0],
[-9e-1, -80.0, -80.0, -5e-1],
[-80.0, -80.0, -80.0, 0.0],
])
boh = self.decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'a')
def test_require_log_probs(self):
logits = np.asarray([
[-10.0, -80.0, -80.0, -10.0],
])
self.assertRaises(ValueError, self.decoder, logits)
class CTCPrefixDecoderWiderBeamTests:
def test_prefix_joining_regression(self):
logits = np.asarray([
[-2, -10, -80.0, -2.0],
[-4e-4, -80.0, -80.0, -7.0],
[-9e-1, -80.0, -80.0, -5e-1],
[-80.0, -80.0, -80.0, 0.0],
])
boh = self.decoder(logits, max_unnormalization=np.inf)
all_transcripts = list(hyp.transcript for hyp in boh)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'a')
self.assertEqual(len(set(all_transcripts)), 2)
self.assertEqual(set(all_transcripts), {'a', ''})
class GreedyDecoderTests(CTCPrefixDecodersBeam1Tests, unittest.TestCase):
def setUp(self):
letters = ['a', 'b', 'c']
self.decoder = GreedyDecoder(letters+[BLANK_SYMBOL])
class CTCPrefixLogRawNumpyDecoderBeam2Tests(CTCPrefixDecodersBeam1Tests, CTCPrefixDecoderWiderBeamTests, unittest.TestCase):
def setUp(self):
letters = ['a', 'b', 'c']
self.decoder = CTCPrefixLogRawNumpyDecoder(letters+[BLANK_SYMBOL], k=2)
class BlankCheckTests(unittest.TestCase):
def test_greedy_decoder_uniqueness(self):
self.assertRaises(ValueError, GreedyDecoder, ['a', BLANK_SYMBOL, 'b'] + [BLANK_SYMBOL])
def test_greedy_decoder_blank_at_end(self):
self.assertRaises(ValueError, GreedyDecoder, ['a', BLANK_SYMBOL, 'b'])
def test_greedy_decoder_missing_blank(self):
self.assertRaises(ValueError, GreedyDecoder, ['a', 'b'])
def test_rawlog_decoder_uniqueness(self):
self.assertRaises(ValueError, CTCPrefixLogRawNumpyDecoder, ['a', BLANK_SYMBOL, 'b'] + [BLANK_SYMBOL], k=2)
def test_rawlog_decoder_blank_at_end(self):
self.assertRaises(ValueError, CTCPrefixLogRawNumpyDecoder, ['a', BLANK_SYMBOL, 'b'], k=2)
def test_rawlog_decoder_missing_blank(self):
self.assertRaises(ValueError, CTCPrefixLogRawNumpyDecoder, ['a', 'b'], k=2)
class CTCPrefixLogRawNumpyDecoderBeam1Tests(CTCPrefixDecodersBeam1Tests, unittest.TestCase):
def setUp(self):
letters = ['a', 'b', 'c']
self.decoder = CTCPrefixLogRawNumpyDecoder(letters+[BLANK_SYMBOL], k=1)
def test_beam_not_int(self):
letters = ['a', 'b', 'c']
self.assertRaises(TypeError, CTCPrefixLogRawNumpyDecoder, letters+[BLANK_SYMBOL], k=None)
def test_beam_not_positive(self):
letters = ['a', 'b', 'c']
self.assertRaises(ValueError, CTCPrefixLogRawNumpyDecoder, letters+[BLANK_SYMBOL], k=0)
class CTCDecodingWithLMTests:
def get_lm(self, a=-10.0, b=-10.0, c=-10.0):
lm = DummyLm()
lm.decoder._model_o.weight[1, 0] = 0.0
lm.decoder._model_o.weight[2, 0] = 0.0
lm.decoder._model_o.weight[3, 0] = 0.0
lm.decoder._model_o.bias[1] = a
lm.decoder._model_o.bias[2] = b
lm.decoder._model_o.bias[3] = c
return lm
def test_single_selection_a(self):
lm = self.get_lm(a=-1)
decoder = self._decoder_constructor(
self._decoder_symbols,
k=1,
lm=lm
)
logits = np.asarray([
[-1, -1, -80.0, -80.0],
])
boh = decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'a')
for h in boh:
self.assertEqual(h.lm_sc, lm.single_sentence_nll(list(h.transcript), '</s>'))
def test_single_selection_b(self):
lm = self.get_lm(b=-1)
decoder = self._decoder_constructor(
self._decoder_symbols,
k=1,
lm=lm
)
logits = np.asarray([
[-1, -1, -80.0, -80.0],
])
boh = decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'b')
for h in boh:
self.assertEqual(h.lm_sc, lm.single_sentence_nll(list(h.transcript), '</s>'))
def test_insertion_bonus(self):
lm = self.get_lm(a=-1, b=-1, c=-1)
insertion_bonus = 0.5
decoder = self._decoder_constructor(
self._decoder_symbols,
k=1,
lm=lm,
insertion_bonus=insertion_bonus,
)
logits = np.asarray([
[-80.0, -0.1, -80.0, -0.7],
])
boh = decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'b')
for h in boh:
true_lm_nll = lm.single_sentence_nll(list(h.transcript), '</s>')
self.assertEqual(h.lm_sc, true_lm_nll + insertion_bonus*len(h.transcript))
def test_single_selection_repeated_b(self):
lm = self.get_lm(b=-1)
decoder = self._decoder_constructor(
self._decoder_symbols,
k=1,
lm=lm
)
logits = np.asarray([
[-1, -1, -80.0, -80.0],
[-1, -1, -80.0, -80.0],
])
boh = decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'b')
for h in boh:
self.assertEqual(h.lm_sc, lm.single_sentence_nll(list(h.transcript), '</s>'))
def get_bying_lm(self):
lm = DummyLm()
lm.model._model_r.weight[0, 0] = 2
lm.model._model_r.bias[0] = 0
lm.model._model_i.weight[0, 0] = 0
lm.decoder._model_o.weight[1, 0] = -0.0
lm.decoder._model_o.weight[2, 0] = -1.0
lm.decoder._model_o.weight[3, 0] = -2.0
lm.decoder._model_o.bias[1] = -10
lm.decoder._model_o.bias[2] = 0
lm.decoder._model_o.bias[3] = 30
return lm
def test_switching_lm_b(self):
lm = self.get_bying_lm()
decoder = self._decoder_constructor(
self._decoder_symbols,
k=1,
lm=lm
)
logits = np.asarray([
[-1, -80.0, -80.0, -80.0],
[-80.0, -1.0, -1.0, -80.0],
])
boh = decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'ab')
for h in boh:
self.assertEqual(h.lm_sc, lm.single_sentence_nll(list(h.transcript), '</s>'))
def get_cying_lm(self):
lm = DummyLm()
lm.model._model_r.weight[0, 0] = 2
lm.model._model_r.bias[0] = 0
lm.model._model_i.weight[0, 0] = 0
lm.decoder._model_o.weight[1, 0] = -0.0
lm.decoder._model_o.weight[2, 0] = -2.0
lm.decoder._model_o.weight[3, 0] = -1.0
lm.decoder._model_o.bias[1] = -10
lm.decoder._model_o.bias[2] = 30
lm.decoder._model_o.bias[3] = 0
return lm
def test_switching_lm_c(self):
lm = self.get_cying_lm()
decoder = self._decoder_constructor(
self._decoder_symbols,
k=1,
lm=lm
)
logits = np.asarray([
[-1, -80.0, -80.0, -80.0],
[-80.0, -1.0, -1.0, -80.0],
])
boh = decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'ac')
for h in boh:
self.assertEqual(h.lm_sc, lm.single_sentence_nll(list(h.transcript), '</s>'))
def get_eosing_lm(self):
lm = DummyLm()
lm.model._model_r.weight[0, 0] = 2
lm.model._model_r.bias[0] = 0
lm.model._model_i.weight[0, 0] = 0
lm.model._model_i.weight[1, 0] = 1
lm.model._model_i.weight[0, 0] = 0
lm.decoder._model_o.weight[0, 0] = 1.0
lm.decoder._model_o.weight[1, 0] = 0.0
lm.decoder._model_o.weight[2, 0] = 0.0
lm.decoder._model_o.weight[3, 0] = 0.0
lm.decoder._model_o.bias[0] = -2
lm.decoder._model_o.bias[1] = -1
lm.decoder._model_o.bias[2] = -1
lm.decoder._model_o.bias[3] = -1
return lm
def test_respecting_eos(self):
lm = self.get_eosing_lm()
decoder = self._decoder_constructor(
self._decoder_symbols,
k=2,
lm=lm
)
logits = np.asarray([
[-80.0, -2.0, -80.0, -1.0],
])
boh = decoder(logits, model_eos=True, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(hyp, 'b')
for h in boh:
self.assertEqual(h.lm_sc, lm.single_sentence_nll(list(h.transcript) + ['</s>'], '</s>'))
def test_archiving_lm_scale(self):
lm = self.get_eosing_lm()
decoder = self._decoder_constructor(
self._decoder_symbols,
k=2,
lm=lm,
lm_scale=0.1
)
logits = np.asarray([
[-80.0, -2.0, -80.0, -1.0],
])
boh = decoder(logits, model_eos=True, max_unnormalization=np.inf)
self.assertEqual(boh.lm_weight, 0.1)
def test_beam_2(self):
lm = self.get_cying_lm()
decoder = self._decoder_constructor(
self._decoder_symbols,
k=2,
lm=lm
)
logits = np.asarray([
[-1, -80.0, -80.0, -80.0],
[-80.0, -1.0, -1.0, -80.0],
])
boh = decoder(logits, max_unnormalization=np.inf)
hyp = boh.best_hyp()
self.assertEqual(len(boh), 2)
self.assertEqual(hyp, 'ac')
for h in boh:
self.assertEqual(h.lm_sc, lm.single_sentence_nll(list(h.transcript), '</s>'))
def test_decoder_returns_hidden_state_of_best_hyp(self):
lm = self.get_cying_lm()
decoder = self._decoder_constructor(
self._decoder_symbols,
k=2,
lm=lm
)
logits = np.asarray([
[-1, -80.0, -80.0, -80.0],
[-80.0, -1.0, -1.0, -80.0],
])
boh, last_h = decoder(logits, max_unnormalization=np.inf, return_h=True)
hyp = boh.best_hyp()
self.assertEqual(len(boh), 2)
self.assertEqual(hyp, 'ac')
self.assertEqual(last_h._h, torch.tensor([85.0]))
def test_decoder_accepts_hidden_state(self):
lm = self.get_cying_lm()
decoder = self._decoder_constructor(
self._decoder_symbols,
k=2,
lm=lm
)
logits = np.asarray([
[-1, -80.0, -80.0, -80.0],
[-80.0, -2.0, -1.0, -80.0],
])
init_h = HiddenState(torch.tensor([[[1.0]]]))
boh = decoder(logits, max_unnormalization=np.inf, init_h=init_h)
hyp = boh.best_hyp()
self.assertEqual(len(boh), 2)
self.assertEqual(hyp, 'ab')
def test_decoder_hidden_state_propagates(self):
lm = self.get_cying_lm()
decoder = self._decoder_constructor(
self._decoder_symbols,
k=2,
lm=lm
)
logits_1 = np.asarray([
[-1, -80.0, -80.0, -80.0],
])
logits_2 = np.asarray([
[-80.0, -0.8, -1.0, -80.0],
])
_, last_h = decoder(logits_1, max_unnormalization=np.inf, return_h=True)
boh = decoder(logits_2, max_unnormalization=np.inf, init_h=last_h)
hyp = boh.best_hyp()
self.assertEqual(len(boh), 2)
self.assertEqual(hyp, 'c')
def test_wide_beam_regression(self):
decoder = self._decoder_constructor(
self._decoder_symbols,
k=2,
)
logits = np.asarray([
[-0.1, -8.0, -80.0, -2.0],
[-0.0, -5.0, -80.0, -80.0],
])
boh = decoder(logits, max_unnormalization=np.inf)
a_hyp = [hyp for hyp in boh if hyp.transcript == 'a']
assert(len(a_hyp) == 1) # this is really a plain assert. There SHALL NOT be multiple hypotheses of the same text
a_hyp = a_hyp[0]
self.assertEqual(a_hyp.vis_sc, np.logaddexp(-0.1, -2.0))
class CTCPrefixLogRawNumpyDecoderLMTests(CTCDecodingWithLMTests, unittest.TestCase):
def setUp(self):
self._decoder_symbols = ['a', 'b', 'c', BLANK_SYMBOL]
self._decoder_constructor = CTCPrefixLogRawNumpyDecoder
class FindNewPrefixesTests(unittest.TestCase):
def setUp(self):
self.letters = ['a', 'b', 'c', BLANK_SYMBOL]
self.blank_ind = 3
def test_old_carry_over(self):
A_prev = ['aaa', 'aab', 'aac']
l_last = np.asarray([0, 1, 2])
best_inds = (np.asarray([0, 1, 2]), np.asarray([3, 3, 3]))
A_new, l_last_new = find_new_prefixes(l_last, best_inds, A_prev, self.letters, self.blank_ind)
self.assertEqual(A_new, A_prev)
self.assertEqual(set(l_last_new.tolist()), set(l_last.tolist()))
def test_all_new(self):
A_prev = ['aaa', 'aab', 'aac']
l_last = np.asarray([0, 1, 2])
best_inds = (np.asarray([0, 1, 2]), np.asarray([1, 1, 1]))
A_exp = ['aaab', 'aabb', 'aacb']
l_last_exp = np.asarray([1, 1, 1])
A_new, l_last_new = find_new_prefixes(l_last, best_inds, A_prev, self.letters, self.blank_ind)
self.assertEqual(A_new, A_exp)
self.assertEqual(set(l_last_new.tolist()), set(l_last_exp.tolist()))
def test_all_mixed(self):
A_prev = ['aaa', 'aab', 'aac']
l_last = np.asarray([0, 1, 2])
best_inds = (np.asarray([0, 1, 2]), np.asarray([1, 3, 0]))
A_exp = ['aaab', 'aab', 'aaca']
l_last_exp = np.asarray([1, 1, 0])
A_new, l_last_new = find_new_prefixes(l_last, best_inds, A_prev, self.letters, self.blank_ind)
self.assertEqual(set(A_new), set(A_exp))
self.assertEqual(set(l_last_new.tolist()), set(l_last_exp.tolist()))
def test_regression1(self):
A_prev = ['b', 'a']
l_last = np.asarray([1, 0])
best_inds = (np.asarray([1, 1]), np.asarray([3, 1]))
A_exp = ['ab', 'a']
l_last_exp = np.asarray([1, 0])
A_new, l_last_new = find_new_prefixes(l_last, best_inds, A_prev, self.letters, self.blank_ind)
self.assertEqual(set(A_new), set(A_exp))
self.assertEqual(set(l_last_new.tolist()), set(l_last_exp.tolist()))
class UpdateLMThingsTests(unittest.TestCase):
def test_shuffling_partial_update(self):
h_prev = np.asarray([11, 12])
lm_preds = np.asarray([[1, 2], [3, 4]])
best_inds = np.asarray([1, 0]), np.asarray([1, 2])
class Object:
pass
lm = Object()
lm.advance_h0 = lambda _, h: 2*h
lm.log_probs = lambda _: np.asarray([5, 6])
expected_h = np.asarray([24, 11])
expected_preds = np.asarray([[5, 6], [1, 2]])
h_new, lm_pred_new = update_lm_things(lm, h_prev, lm_preds, best_inds, blank_ind=2)
self.assertTrue(np.array_equal(h_new, expected_h))
self.assertTrue(np.array_equal(lm_pred_new, expected_preds))
class HelpersTests(unittest.TestCase):
def test_picking_old(self):
best_inds = np.asarray([0, 1, 2]), np.asarray([3, 2, 3])
picks = get_old_prefixes_positions(best_inds, 3)
self.assertEqual(picks, [0, 2])
def test_picking_new(self):
best_inds = np.asarray([0, 1, 2]), np.asarray([3, 2, 3])
picks = get_new_prefixes_positions(best_inds, 3)
self.assertEqual(picks, [1])
| DCGM/pero-ocr | test/test_decoding/test_decoders.py | test_decoders.py | py | 17,774 | python | en | code | 38 | github-code | 90 |
25251422569 |
class Solution:
# @param tokens, a list of string
# @return an integer
def ceil(self, x):
if x == int(x):
return int(x)
else: return int(x)+1
def isOperSym(self, token):
if token == "+" or token == "-" or token == "*" or token == "/":
return True
else:
return False
def evalRPN(self, tokens):
operNum = []
for t in tokens:
if not self.isOperSym(t):
operNum.append(t)
else:
num2 = int(operNum.pop())
num1 = int(operNum.pop())
if t == "+":
operNum.append((num1 + num2))
elif t == "-":
operNum.append((num1 - num2))
elif t == "*":
operNum.append((num1 * num2))
elif t == "/":
if num2 != 0:
num3 = int(num1 *1.0 / num2)
operNum.append(num3)
else:
return None
# raise Exception("divided by zero.")
else:
operNum.append(t)
return int(operNum.pop())
tokens = [
["18"],
["2", "1", "+"],
# ["2", "0", "/"],
["2", "1", "+", "3", "*"],
["2", "1", "3", "*", "+"],
["10","6","9","3","+","-11","*","/","*","17","+","5","+"],
["4","-2","/","2","-3","-","-"],
["6","9","3","+","-11","*","/"]
]
solu = Solution()
for t in tokens:
print((solu.evalRPN(t)))
# 18
# 3
# 9
# 5
# 22
# -7
| sevenseablue/leetcode | src/leet/Evaluate Reverse Polish Notation.py | Evaluate Reverse Polish Notation.py | py | 1,587 | python | en | code | 0 | github-code | 90 |
35390242387 | import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPool2D
from keras.preprocessing.image import ImageDataGenerator
from keract import get_activations, display_activations
datagen = ImageDataGenerator()
train_data = datagen.flow_from_directory("Covid19-dataset/train/",target_size=(64,64))
test_data = datagen.flow_from_directory("Covid19-dataset/test/",target_size=(64,64))
model = Sequential()
model.add(Conv2D(filters=48,kernel_size=3,activation="relu",input_shape=[64,64,3]))
model.add(MaxPool2D(pool_size=2,strides=2))
model.add(Conv2D(filters=48,kernel_size=3,activation="relu"))
model.add(MaxPool2D(pool_size=2,strides=2))
model.add(Conv2D(filters=32,kernel_size=3,activation="relu"))
model.add(MaxPool2D(pool_size=2,strides=2))
model.add(Flatten())
model.add(Dense(128,activation="relu"))
model.add(Dense(64,activation="relu"))
model.add(Dense(3,activation="softmax"))
#adam
#sgd
#rmsprop
#____
#categorical_hinge
#cosine_similarity
#categorical_crossentropy
model.compile(optimizer="rmsprop",loss="categorical_crossentropy",metrics=["accuracy"])
model.fit(x=train_data,epochs=1)
sc = model.evaluate(test_data)
print(sc[1]*100) | ildar-dev/python-ad | convolutional-n-n/model.py | model.py | py | 1,264 | python | en | code | 0 | github-code | 90 |
1230187940 | """
该模块:`visul`包含数据可视化的类和函数。
"""
# Author: Sandiagal <sandiagal2525@gmail.com>,
# License: GPL-3.0
import math
from pickle import load
import os
import cv2
from keras import backend as K
from keras.models import Model
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image as Image
from idiplab_cv.dataset_io import reverse_dict
from idiplab_cv.dataset_io import to_categorical
if not os.path.exists("images"):
os.mkdir("images")
# %%
def _drawline(img, pt1, pt2, color, thickness=1, style="dotted", gap=20):
dist = ((pt1[0]-pt2[0])**2+(pt1[1]-pt2[1])**2)**.5
pts = []
for i in np.arange(0, dist, gap):
r = i/dist
x = int((pt1[0]*(1-r)+pt2[0]*r)+.5)
y = int((pt1[1]*(1-r)+pt2[1]*r)+.5)
p = (x, y)
pts.append(p)
if style == "dotted":
for p in pts:
cv2.circle(img, p, thickness, color, -1)
else:
s = pts[0]
e = pts[0]
i = 0
for p in pts:
s = e
e = p
if i % 2 == 1:
cv2.line(img, s, e, color, thickness)
i += 1
def _drawpoly(img, pts, color, thickness=1, style="dotted",):
s = pts[0]
e = pts[0]
pts.append(pts.pop(0))
for p in pts:
s = e
e = p
_drawline(img, s, e, color, thickness, style)
def _drawrect(img, pt1, pt2, color, thickness=1, style="dotted"):
pts = [pt1, (pt2[0], pt1[1]), pt2, (pt1[0], pt2[1])]
_drawpoly(img, pts, color, thickness, style)
# %%
def show_grid(imgs, title=None, suptitles=None, rows=None, cols=None, colors=None):
plt.style.use("classic")
imgs_num = len(imgs)
imgs = np.array(imgs)
if type(imgs[0]) == np.ndarray:
imgs = [Image.fromarray(img.astype('uint8')) for img in imgs]
if not rows:
rows = math.ceil(np.sqrt(imgs_num))
if not cols:
cols = int(round(np.sqrt(imgs_num)))
f, axarr = plt.subplots(cols, rows,
figsize=(15, 20),
gridspec_kw={"wspace": 0.03, "hspace": 0.05})
# if title:
# plt.suptitle(title, fontsize=20)
for idx, ax in enumerate(f.axes):
if idx < imgs_num:
ax.imshow(imgs[idx])
if suptitles:
if colors:
ax.set_title(suptitles[idx], ha='center',
fontsize=14, color=colors[idx])
else:
ax.set_title(suptitles[idx], fontsize=10)
ax.axis("off")
if not title:
title = "grid"
plt.savefig("images/"+title+".png", bbox_inches='tight', dpi=200)
return plt
def overall(imgs, number_to_show=20):
# Plot images of the digits
img_show = []
for i in range(0, len(imgs), len(imgs)//number_to_show):
img_show.append(imgs[i])
plt = show_grid(img_show, "A selection from the dataset")
return plt
def CAM(img_white=None, model=None, feature_layer=None, weight_layer=None, feature_map=None, weights=None, scores_predict=None, idx_predic=None, display=False, img_show=None, label_show=None, class_to_index=None, extend=False):
width, height, _ = img_show.shape
omit = 1.75
if feature_map is None or weights is None or scores_predict is None:
getOutput = K.function([model.input], [model.get_layer(
feature_layer).output, model.output])
[feature_map, scores_predict] = getOutput(
[np.expand_dims(img_white, axis=0)])
weightLayer = model.get_layer(weight_layer)
weights = weightLayer.get_weights()[0]
if idx_predic == None:
idx_predic = np.argmax(scores_predict)
weight = weights[:, idx_predic]
feature_map = feature_map[0]
cam = np.matmul(feature_map, weight)
cam = (cam - cam.min()) / (cam.max() - cam.min())
if cam[0, 0]+cam[0, -1]+cam[-1, 0]+cam[-1, -1] < 3:
cam = 1-cam
cam = cv2.resize(cam, (height, width))
heatmap = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET)
heatmap[np.where(cam > omit)] = 0
mix = cv2.addWeighted(src1=img_show.astype("uint8"), src2=heatmap,
alpha=0.8, beta=0.4, gamma=0)
if display:
Image.fromarray(img_show).save('images/img_origin.png')
Image.fromarray(mix).save('images/CAM.png')
index_to_class = reverse_dict(class_to_index)
predic_class = index_to_class[idx_predic]
predict_score = np.max(scores_predict)
if extend is False:
plt.figure(figsize=(11, 8))
plt.subplot(121)
plt.axis("off")
plt.title("Actual: %s" % (label_show))
plt.imshow(img_show)
plt.subplot(122)
plt.axis("off")
plt.title("Predict: %s %.2f%%" %
(predic_class, predict_score * 100))
plt.imshow(mix)
else:
plt.figure()
plt.subplot(221)
plt.axis("off")
plt.title("Original image: %s" % (index_to_class[label_show]))
plt.imshow(img_show)
plt.subplot(222)
plt.axis("off")
plt.title("Top 1: %s %.2f%%" % (predic_class, predict_score * 100))
plt.imshow(mix)
idx_predic3 = label_show
predict_score3 = scores_predict[0, idx_predic3]
weight3 = weights[:, idx_predic3]
cam3 = np.matmul(feature_map, weight3)
cam3 = (cam3 - cam3.min()) / (cam3.max() - cam3.min())
if cam3[0, 0]+cam3[0, -1]+cam3[-1, 0]+cam3[-1, -1] < 2:
cam3 = 1-cam3
cam3 = cv2.resize(cam3, (height, width))
heatmap3 = cv2.applyColorMap(np.uint8(255*cam3), cv2.COLORMAP_JET)
heatmap3[np.where(cam3 > omit)] = 0
mix3 = cv2.addWeighted(src1=img_show, src2=heatmap3,
alpha=0.8, beta=0.4, gamma=0)
plt.subplot(223)
plt.axis("off")
plt.title("For ground truth: %s %.2f%%" %
(index_to_class[label_show], predict_score3 * 100))
plt.imshow(mix3)
idx_predic4 = np.argsort(scores_predict[0, :])[-2]
predic_class4 = index_to_class[idx_predic4]
predict_score4 = scores_predict[0, idx_predic4]
weight4 = weights[:, idx_predic4]
cam4 = np.matmul(feature_map, weight4)
cam4 = (cam4 - cam4.min()) / (cam4.max() - cam4.min())
if cam4[0, 0]+cam4[0, -1]+cam4[-1, 0]+cam4[-1, -1] < 2:
cam4 = 1-cam4
cam4 = cv2.resize(cam4, (height, width))
heatmap4 = cv2.applyColorMap(np.uint8(255*cam4), cv2.COLORMAP_JET)
heatmap4[np.where(cam4 > omit)] = 0
mix4 = cv2.addWeighted(src1=img_show, src2=heatmap4,
alpha=0.8, beta=0.4, gamma=0)
plt.subplot(224)
plt.axis("off")
plt.title("Top 2: %s %.2f%%" %
(predic_class4, predict_score4 * 100))
plt.imshow(mix4)
plt.savefig("images/"+"True.%s(%.1f%%) Top1.%s(%.1f%%) Top2.%s(%.1f%%).jpg" %
(
# label_show,
label_show, predict_score3 * 100,
predic_class, predict_score * 100,
predic_class4, predict_score4 * 100),
bbox_inches='tight',
dpi=300
)
return cam, mix
def CAMs(imgs_white, model, feature_layer, weight_layer, idxs_predic=None, display=False, img_show=None):
'''
一次性计算所有图像的CAM
'''
if not isinstance(imgs_white, dict):
sample, width, height, _ = imgs_white.shape
getFeatureMaps = Model(inputs=model.input, outputs=model.get_layer(
feature_layer).output)
feature_maps = getFeatureMaps.predict(
imgs_white, batch_size=32, verbose=1)
getScoresPredict = K.function([model.get_layer(index=model.layers.index(
model.get_layer(feature_layer))+1).input], [model.output])
[scores_predict] = getScoresPredict([feature_maps])
weightLayer = model.get_layer(weight_layer)
weights = weightLayer.get_weights()[0]
else:
sample = len(imgs_white["feature_maps"])
_, width, height, _ = model.input.shape.as_list()
feature_maps = imgs_white["feature_maps"]
weights = imgs_white["weights"]
getScoresPredict = K.function([model.get_layer(index=model.layers.index(
model.get_layer(feature_layer))+1).input], [model.output])
[scores_predict] = getScoresPredict([feature_maps])
if idxs_predic == None:
idxs_predic = [None]*sample
cams = []
for i in range(sample):
if idxs_predic[i] == None:
idxs_predic[i] = np.argmax(scores_predict[i])
cam = feature_maps[i]@ weights[:, idxs_predic[i]]
cam = (cam - cam.min()) / (cam.max() - cam.min())
if cam[0, 0]+cam[0, -1]+cam[-1, 0]+cam[-1, -1] < 2:
cam = 1-cam
cam = cv2.resize(cam, (height, width))
cams.append(cam)
if display:
if i == 0:
heatmap = cv2.applyColorMap(
np.uint8(255*cam), cv2.COLORMAP_JET)
heatmap[np.where(cam > 0.8)] = 0
mix = cv2.addWeighted(src1=img_show, src2=heatmap,
alpha=0.8, beta=0.4, gamma=0)
plt.figure(figsize=(11, 8))
plt.imshow(mix)
return cams
def cropMask(cam, img_show, display=False):
img_show = np.copy(img_show)
cam = np.copy(cam)
n, h, = cam.shape
cam = 255*(1-cam)
cam = cam.astype("uint8")
_, thresh = cv2.threshold(cam, 0.7*255, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_Index = 0
for i in range(len(contours)):
max_Index = i if contours[i].shape[0] > contours[max_Index].shape[0] else max_Index
cnt = contours[max_Index]
xHRO, yHRO, wHRO, hHRO = cv2.boundingRect(cnt)
_, thresh = cv2.threshold(cam, 0.4*255, 255, cv2.THRESH_BINARY)
contoursAB, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contoursAB:
if np.min(contour[:, 0, 0]) <= np.min(cnt[:, 0, 0]) and np.max(contour[:, 0, 0]) >= np.max(cnt[:, 0, 0]) and np.min(contour[:, 0, 1]) <= np.min(cnt[:, 0, 1]) and np.max(contour[:, 0, 1]) >= np.max(cnt[:, 0, 1]):
cnt = contour
xAB, yAB, wAB, hAB = cv2.boundingRect(cnt)
# plt.figure()
# plt.imshow(thresh)
# return contours, contours1
#xx = 5
#yy = 70
#cv2.rectangle(target, (xx, yy), (xx+224, yy+224), (192,192,192), 5)
#text = "Random crop"
# cv2.putText(target, text, (xx, yy), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# fontScale=0.5, color=(192,192,192), thickness=10, lineType=cv2.LINE_AA)
# cv2.putText(target, text, (xx, yy), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# fontScale=0.5, color=(0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
xSC = max(xHRO+wHRO-wAB, 0)
ySC = max(yHRO+hHRO-hAB, 0)
ww = min(xHRO+wAB, h)
hh = min(yHRO+hAB, n)
if display:
cv2.rectangle(img_show, (xSC, ySC), (xSC+wAB, ySC+hAB), (0, 255, 0), 5)
_drawrect(img_show, (xSC, ySC), (ww, hh), (0, 255, 0), 5, "dotted")
text = "Supervised Crop Box (%dx%d)" % (wAB, hAB)
cv2.putText(img_show, text, (xSC, ySC), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4, color=(0, 255, 0), thickness=10, lineType=cv2.LINE_AA)
cv2.putText(img_show, text, (xSC, ySC), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4, color=(0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
cv2.rectangle(img_show, (xAB, yAB), (xAB+wAB, yAB+hAB), (0, 0, 255), 5)
text = "Anchor Box (%dx%d)" % (wAB, hAB)
cv2.putText(img_show, text, (xAB, yAB), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4, color=(0, 0, 255), thickness=10, lineType=cv2.LINE_AA)
cv2.putText(img_show, text, (xAB, yAB), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4, color=(255, 255, 255), thickness=1, lineType=cv2.LINE_AA)
cv2.rectangle(img_show, (xHRO, yHRO),
(xHRO+wHRO, yHRO+hHRO), (220, 20, 60), 5)
text = "Highest Respond Box (%dx%d)" % (wHRO, hHRO)
cv2.putText(img_show, text, (xHRO, yHRO), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4, color=(220, 20, 60), thickness=10, lineType=cv2.LINE_AA)
cv2.putText(img_show, text, (xHRO, yHRO), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.4, color=(0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
plt.figure(figsize=(10, 8))
plt.imshow(img_show)
plt.axis("off")
Image.fromarray(img_show).save('images/CAM_ABSC.png')
return xAB, yAB, wAB, hAB, xSC, ySC, ww-wAB, hh-hAB
def worst_samples(imgs_valid, labels_valid, score_predict, class_to_index, top=10, names_valid=None):
'''
栅格展示分类效果最差的样本
'''
index_to_class = reverse_dict(class_to_index)
labels_valid_onehot = to_categorical(labels_valid, len(class_to_index))
score_valid = np.max(np.array(score_predict)*labels_valid_onehot, axis=1)
labels_predict = np.argmax(score_predict, axis=1)
score_predict_max = np.max(score_predict, axis=1)
worst_index = np.argsort(-score_predict_max+score_valid)[:top]
asd = score_predict_max-score_valid
imgs = []
suptitles = []
for _, index in enumerate(worst_index):
imgs.append(imgs_valid[index])
suptitle = 'Predict: %s (%5.2f%%)\n True : %s (%5.2f%%)' % (
index_to_class[labels_predict[index]],
score_predict_max[index]*100,
index_to_class[labels_valid[index]],
score_valid[index]*100)
suptitles.append(suptitle)
if names_valid is not None:
name = 'Predict %s True %s' % (
index_to_class[labels_predict[index]],
index_to_class[labels_valid[index]])
print('%5.2f' % (asd[index]*100))
print(name+'\n'+names_valid[index])
print()
plt = show_grid(imgs, 'Worst prediction samples', suptitles=suptitles)
return plt
def best_worst_samples(imgs_valid, labels_valid, feature_maps, weights, scores_predict, class_to_index):
'''
模型分级结果和对应的CAM的示例。
同一行的示例具有相同的真实等级。
每一列中,前两副图表示最好的预测结果,而后两副图表示最差的结果。
在每一个示例中,我们首先展示了原始图像的真实等级和治疗对策,然后标出了预测等级和治疗建议,并标注了对应的预测概率和对应等级的CAM。
CAM中越红的位置表示其越有可能是病灶区域。
'''
index_to_class = reverse_dict(class_to_index)
labels_valid_onehot = to_categorical(labels_valid, len(class_to_index))
score_valid = np.max(np.array(scores_predict)*labels_valid_onehot, axis=1)
labels_predict = np.argmax(scores_predict, axis=1)
score_predict_max = np.max(scores_predict, axis=1)
imgs = []
suptitles = []
colors = []
for i in range(0, len(class_to_index)):
# i=2
cur_class = labels_valid == i
cur_class = np.array([i for i, _ in enumerate(labels_valid)])[
cur_class]
cur_score_predict = score_predict_max[cur_class]
cur_score_valid = score_valid[cur_class]
worst_index = np.argsort(-cur_score_predict+cur_score_valid)[:1]
best_index = np.argsort(-cur_score_predict+cur_score_valid)[-1:]
for _, index in enumerate(cur_class[best_index]):
imgs.append(imgs_valid[index])
suptitle = "True = %s\n%s" % (
index_to_class[labels_valid[index]],
"Follow-up" if (labels_valid[index]) < 2 else "Surgery")
suptitles.append(suptitle)
colors.append("blue")
cam, mix = CAM(
feature_map=np.expand_dims(feature_maps[index], axis=0),
weights=weights,
scores_predict=scores_predict[index],
display=False,
img_show=imgs_valid[index],
)
imgs.append(mix)
suptitle = "Predict = %s (%4.1f%%)\n%s (%4.1f%%)" % (
index_to_class[labels_predict[index]],
score_predict_max[index]*100,
"Follow-up" if (labels_predict[index]) < 2 else "Surgery",
(sum(scores_predict[index, :2]) if (labels_predict[index]) < 2 else sum(scores_predict[index, 2:]))*100)
suptitles.append(suptitle)
colors.append("blue")
for _, index in enumerate(cur_class[worst_index]):
imgs.append(imgs_valid[index])
suptitle = "True = %s\n%s" % (
index_to_class[labels_valid[index]],
"Follow-up" if (labels_valid[index]) < 2 else "Surgery")
suptitles.append(suptitle)
colors.append("red")
cam, mix = CAM(
feature_map=np.expand_dims(feature_maps[index], axis=0),
weights=weights,
scores_predict=scores_predict[index],
display=False,
img_show=imgs_valid[index],
)
imgs.append(mix)
suptitle = "Predict = %s (%4.1f%%)\n%s (%4.1f%%)" % (
index_to_class[labels_predict[index]],
score_predict_max[index]*100,
"Follow-up" if (labels_predict[index]) < 2 else "Surgery",
(sum(scores_predict[index, :2]) if (labels_predict[index]) < 2 else sum(scores_predict[index, 2:]))*100)
suptitles.append(suptitle)
colors.append("red")
plt = show_grid(imgs, suptitles=suptitles, rows=4, cols=6, colors=colors, title="best_worst_samples")
return plt
| IDIPLAB/IDIPLAB_CV | idiplab_cv/visul.py | visul.py | py | 18,245 | python | en | code | 4 | github-code | 90 |
42642533497 | from __future__ import division
from ignite.metrics.metric import Metric
import numpy as np
import heapq
import torch
def recognition_rate_at_k(probe_x, probe_y, gallery_x, gallery_y, k, measure):
"""Compute the recognition rate at a given level `k`"""
# (75,75)
label_eq_mat = np.equal(probe_y.reshape(-1, 1), gallery_y.reshape(1, -1)).astype(np.float)
# (75,1)
num_relevant = np.minimum(np.float(k), np.sum(label_eq_mat, axis=1))
predictions = np.exp(-measure(probe_x, gallery_x)) # (75,75) Compute similarity.
# top_k large numbers of each row
prediction_indices = []
data_of_row_range = range(len(predictions[0]))
for row in range(len(predictions)):
prediction_indices.append(heapq.nlargest(k, data_of_row_range, predictions[row].take))
# (75, k)
label_mat = []
for row in range(len(predictions)):
label_mat.append(gallery_y[prediction_indices[row]])
# (75,k)
label_eq_mat = np.equal(label_mat, probe_y.reshape(-1, 1)).astype(np.float)
true_positives_at_k = np.sum(label_eq_mat, axis=1) # (75,1)
return true_positives_at_k / num_relevant
def cosine_distance(a, b=None):
"""Compute element-wise cosine distance between `a` and `b` """
a_normed = a / np.sqrt(np.square(a).sum(axis=1)).reshape(-1, 1) # to unit each row in matrix 'a'
b_normed = b / np.sqrt(np.square(b).sum(axis=1)).reshape(-1, 1)
return 1 - np.dot(a_normed, b_normed.T)
def cmc_metric_at_k(probe_x, probe_y, gallery_x, gallery_y, k=1):
recognition_rate = recognition_rate_at_k( # (75, 1) that is the recognition rate for each probe
probe_x, probe_y, gallery_x, gallery_y, k, cosine_distance) # total number of probes is 75
return np.mean(recognition_rate)
class CmcMetric(Metric):
"""
Calculates the categorical accuracy.
- `update` must receive output of the form `(y_pred, y)`.
- `y_pred` must be in the following shape (batch_size, num_categories, ...)
- `y` must be in the following shape (batch_size, ...)
"""
# pay attention to the torch.Tensor or numpy.ndarray
_features, _targets = None, None
def reset(self):
self._features = torch.tensor([], dtype=torch.float)
self._targets = torch.tensor([], dtype=torch.int)
def update(self, output):
features, targets = output
# shape; [batch_size, 128]
features = features.type_as(self._features)
targets = targets.type_as(self._targets)
self._features = torch.cat([self._features, features], dim=0)
self._targets = torch.cat([self._targets, targets], dim=0)
def compute(self):
# change type to ndarray
features = self._features.numpy()
targets = self._targets.numpy()
# get data_length
data_len = len(targets)
if data_len <= 0:
return 0
num_probes = np.int(data_len/2) # num_probes = num_gallery_images
# seperate the features between the probes and gallery
probe_x = features[:num_probes]
probe_y = targets[:num_probes]
gallery_x = features[num_probes:data_len]
gallery_y = targets[num_probes:data_len]
return cmc_metric_at_k(probe_x, probe_y, gallery_x, gallery_y)
| houweidong/models | training/cmc_metric.py | cmc_metric.py | py | 3,259 | python | en | code | 0 | github-code | 90 |
8868225052 | from Commons import gtk
from Commons import os
from Commons import _
from AF import AudioFile
class TagsEditor(gtk.Dialog):
def __init__(self, cfname, treeModel, path, columns, colToKey):
gtk.Dialog.__init__(self)
i = cfname.rfind('/')
self.set_title(_('Edit Tags'))
file = AudioFile(cfname[:i], cfname[i+1:])
self.set_size_request(300, 300)
vbox = self.get_child()
hbox = gtk.HBox()
l1 = gtk.Label('File:')
l1.set_size_request(5,5)
l1.set_alignment(0.1,0.5)
self.entries = dict()
e = gtk.Entry()
e.set_text(cfname[i+1:])
e.connect('changed', self.changed, 'file')
hbox.pack_start(l1)
hbox.pack_start(e)
vbox.pack_start(hbox)
for key in columns[1:7]:
hbox = gtk.HBox()
l = gtk.Label(key + ':')
l.set_size_request(5,5)
l.set_alignment(0.1,0.5)
e = gtk.Entry()
if file.supported:
text = file.getTagValue(colToKey[key])
else:
text = '***Not Supported***'
e.set_text(text)
e.set_editable(file.supported)
e.set_sensitive(file.supported)
e.connect('changed', self.changed, key)
hbox.pack_start(l)
hbox.pack_start(e)
vbox.pack_start(hbox)
self.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
self.add_button(gtk.STOCK_SAVE, gtk.RESPONSE_APPLY)
self.show_all()
row = treeModel.get_iter(path)
response = self.run()
if response == gtk.RESPONSE_CANCEL or response == gtk.RESPONSE_DELETE_EVENT:
self.destroy()
elif response == gtk.RESPONSE_APPLY:
if file.supported:
for key in self.entries.keys():
if key != 'file':
val = self.entries[key].get_text()
file.writeTagValue(colToKey[key], val)
n = columns.index(key)
treeModel.set_value(row, n, val)
if 'file' in self.entries.keys():
newname = os.path.join(cfname[:i], self.entries['file'].get_text())
os.rename(cfname, newname)
treeModel.set_value(row, len(columns)-1, newname)
treeModel.set_value(row, 0, self.entries['file'].get_text())
self.destroy()
def changed(self, editable, key):
self.entries[key] = editable
| andrebask/cometsound | src/TagsEditorDialog.py | TagsEditorDialog.py | py | 2,598 | python | en | code | 0 | github-code | 90 |
18523439359 | import sys
N, M = map(int, input().split())
pm = [(i,j,k) for i in range(-1,2,2) for j in range(-1,2,2) for k in range(-1,2,2)]
lst = []
for _ in range(N):
x,y,z = map(int, input().split())
lst.append((x,y,z))
rlt = -sys.maxsize
for a,b,c in pm:
tmp = []
for x,y,z in lst:
tmp.append(a*x+b*y+c*z)
tmp.sort(reverse=True)
rlt = max(rlt, sum(tmp[:M]))
print(rlt) | Aasthaengg/IBMdataset | Python_codes/p03326/s020824031.py | s020824031.py | py | 382 | python | en | code | 0 | github-code | 90 |
30616608738 | """Extension of optical_gater_server for emulating gating with saved brightfield data"""
# Python imports
import sys, os, time, argparse, glob, warnings, platform
import numpy as np
import json
import urllib.request
# Module imports
from loguru import logger
from tqdm.auto import tqdm
# See comment in pyproject.toml for why we have to try both of these:
try:
import skimage.io as tiffio
except:
import tifffile as tiffio
# Local imports
from . import optical_gater_server as server
from . import pixelarray as pa
class FileOpticalGater(server.OpticalGater):
"""Extends the optical gater server for a pre-captured data file.
"""
def __init__(
self,
source=None,
settings=None,
ref_frames=None,
ref_frame_period=None,
repeats=1,
force_framerate=False
):
"""Function inputs:
source str A path (which may include wildcards) to a tiff file(s)
to be processed as image data.
If NULL, we will look in the settings dictionary
settings dict Parameters affecting operation
(see optical_gating_data/json_format_description.md)
ref_frames arraylike
If not Null, this is a sequence of reference frames that
the caller is telling us to use from the start (rather than
optical_gater_server determining a reference sequence from the
supplied input data
ref_frame_period float Noninteger period for supplied ref frames
repeats int Number of times to play through the frames in the source .tif file
force_framerate bool Whether or not to slow down the rate at which new frames
are delivered, such that we emulate real-world speeds
"""
# Initialise parent
super(FileOpticalGater, self).__init__(
settings=settings, ref_frames=ref_frames, ref_frame_period=ref_frame_period)
self.force_framerate = force_framerate
if force_framerate and (platform.system() == 'Windows'):
warnings.warn("force_framerate is unreliable on Windows due to limited granularity of sleep()")
self.progress_bar = True # May be updated during run_server
# How many times to repeat the sequence
self.repeats_remaining = repeats
# Load the data
self.load_data(source)
def load_data(self, filename):
"""Load data file"""
# Load
logger.success("Loading image data...")
self.data = None
# We accumulate the individual files as a list of arrays, and then concatenate them all together
# This copes with the wildcard case where there is more than one image being loaded,
# and this chosen strategy performs much better than np.append when we have lots of individual images.
imageList = []
for fn in tqdm(sorted(glob.glob(filename)), desc='Loading image data'):
logger.debug("Loading image data from file {0}", fn)
imageData = tiffio.imread(fn)
if len(imageData.shape) == 2:
# Cope with loading a single image - convert it to a 1xMxN array
# We have a performance risk here: np.append is inefficient so we can't just append each image individually
# Instead we accumulate a list and then do a single np.array() call at the end.
imageData = imageData[np.newaxis,:,:]
if (((imageData.shape[-1] == 3) or (imageData.shape[-1] == 4))
and (imageData.strides[-1] != 1)):
# skimage.io.imread() seems to automatically reinterpret a 3xMxN array as a colour array,
# and reorder it as MxNx3. We don't want that! I can't find a way to tell imread not to
# do that (as_grayscale does *not* do what I want...). For now I just detect it empirically
# and undo it.
# The test of 'strides' is an empirical one - clearly imread tweaks that to
# reinterpret the original data in a different way to what was intended, but that
# makes it easy to spot
warnings.warn("Looks like imread converted a {0}-timepoint array into a colour array of shape {1}. We will fix that".format(imageData.shape[-1], imageData.shape))
imageData = np.moveaxis(imageData, -1, 0)
imageList.append(imageData)
if len(imageList) > 0:
self.data = np.concatenate(imageList)
else:
# No files found matching the pattern 'filename'
if "source_url" in self.settings["file"]:
if (sys.platform == "win32"):
os.system("color") # Make ascii color codes work
response = input("\033[1;31mFile {0} not found on disk. Do you want to download from the internet? [Y/n]\033[0m\n".format(filename))
if (response.startswith("Y") or response.startswith("y") or (response == "")):
# Download from the URL provided in the settings file
if (len(os.path.dirname(filename))) > 0:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with tqdm(unit='B', unit_scale=True, desc="Downloading") as t:
urllib.request.urlretrieve(self.settings["file"]["source_url"],
filename,
reporthook=tqdm_hook(t))
logger.info("Downloaded file {0}".format(filename))
# Try again
self.data = tiffio.imread(filename)
else:
raise
else:
logger.error("File {0} not found".format(filename))
raise FileNotFoundError("File {0} not found".format(filename))
if "decimate" in self.settings:
# Process just every nth frame.
# Note that I do this after all the data has loaded. While that is temporarily wasteful of memory,
# it's the easiest way to ensure equal decimation even when the data is split across multiple files of unknown length
self.data = self.data[::self.settings["decimate"]]
self.height, self.width = self.data[0].shape
# Initialise frame iterator and time tracker
self.next_frame_index = 0
self.start_time = time.time() # we use this to sanitise our timestamps
self.last_frame_wallclock_time = None
def run_server(self, show_progress_bar=True):
if show_progress_bar:
self.progress_bar = tqdm(total=self.data.shape[0]*self.repeats_remaining, desc="Processing frames")
super().run_server()
def run_and_analyze_until_stopped(self):
while not self.stop:
self.analyze_pixelarray(self.next_frame())
def next_frame(self):
""" This function gets the next frame from the data source, which can be passed to analyze().
If force_framerate is True, we will impose a delay to ensure that frames are provided
at the rate indicated by the settings key "brightfield_framerate".
That ensures that timings and the timestamps in plots etc are a realistic emulation
of what would happen on a real system.
"""
if self.force_framerate and (self.last_frame_wallclock_time is not None):
wait_s = (1 / self.settings["brightfield"]["brightfield_framerate"]) - (
time.time() - self.last_frame_wallclock_time
)
if wait_s > 1e-9:
# the 1e-9 is a very small time to allow for the calculation
time.sleep(wait_s - 1e-9)
elif self.slow_action_occurred is not None:
logger.success(
"File optical gater failed to sustain requested framerate {0}fps for frame {1} (requested negative delay {2}s). " \
"But that is no particular surprise, because we just did a {3}".format(
self.settings["brightfield"]["brightfield_framerate"],
self.next_frame_index,
wait_s,
self.slow_action_occurred
)
)
else:
logger.warning(
"File optical gater failed to sustain requested framerate {0}fps for frame {1} (requested negative delay {2}s)".format(
self.settings["brightfield"]["brightfield_framerate"],
self.next_frame_index,
wait_s,
)
)
if self.progress_bar is not None:
self.progress_bar.update(1)
if self.next_frame_index == self.data.shape[0] - 1:
self.repeats_remaining -= 1
if self.repeats_remaining <= 0:
# If this is our last frame we set the stop flag for the user/app to know
self.stop = 'out-of-frames'
else:
# Start again at the first frame in the file
self.next_frame_index = 0
if self.force_framerate:
# We are being asked to follow the specified framerate exactly.
# We will do the best we can, but there will inevitably be a slight jitter in
# the actual timings. In the spirit of real-time testing, we use the actual
# wallclock time as the frame timestamp.
# (We normalise by the start time, to avoid unnecessarily large numbers)
this_frame_timestamp = time.time() - self.start_time
else:
# We are not being asked to follow the specified framerate exactly,
# we are just running at whatever speed we can manage.
# Since the analysis code may be looking at the timestamps,
# we need to make sure they contain sane numbers
this_frame_timestamp = self.next_frame_index / float(self.settings["brightfield"]["brightfield_framerate"])
next = pa.PixelArray(
self.data[self.next_frame_index, :, :],
metadata={
"timestamp": this_frame_timestamp
},
)
self.next_frame_index += 1
self.last_frame_wallclock_time = time.time()
return next
def load_settings(raw_args, desc, add_extra_args=None):
'''
Load the settings.json file containing information including
the path to the .tif file to be processed.
Params: raw_args list Caller should normally pass sys.argv here
desc str Description to provide as command line help description
add_extra_args function Function describing additional arguments that argparse should expect,
given the specific needs of the caller
Note that in a settings file, if the key "input_tiff_path" is a relative path then this will be treated
as relative to the *settings file*, not the current working directory.
That seems the only sane behaviour, since when writing the settings file we cannot know
what the current working directory will be when it is used.
'''
parser = argparse.ArgumentParser(description=desc,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("settings", help="path to .json file containing settings")
if (add_extra_args is not None):
add_extra_args(parser)
args = parser.parse_args(raw_args)
# Load the file as a settings file
settings_file_path = args.settings
logger.success("Loading settings file {0}...".format(settings_file_path))
try:
with open(settings_file_path) as data_file:
settings = json.load(data_file)
except FileNotFoundError:
basename = os.path.basename(settings_file_path)
if (basename in ["example_data_settings.json", "pi_default_settings.json"]):
if (sys.platform == "win32"):
os.system("color") # Make ascii color codes work
url = os.path.join("https://github.com/Glasgow-ICG/open-optical-gating/raw/main/optical_gating_data", basename)
response = input("\033[1;31mFile {0} not found on disk. Do you want to download from the internet? [Y/n]\033[0m\n".format(settings_file_path))
if (response.startswith("Y") or response.startswith("y") or (response == "")):
# Download from github
os.makedirs(os.path.dirname(settings_file_path), exist_ok=True)
urllib.request.urlretrieve(url, settings_file_path)
with open(settings_file_path) as data_file:
settings = json.load(data_file)
else:
raise
else:
logger.error("File {0} not found".format(settings_file_path))
raise
# If a relative path to the data file is specified in the settings file,
# we will adjust it to be a path relative to the location of the settings file itself.
# This is the only sane way to behave given that this code could be being run from any working directory
# (Note that os.path.join correctly handles the case where the second argument is an absolute path)
if ("file" in settings
and "input_tiff_path" in settings["file"]):
settings["file"]["input_tiff_path"] = os.path.join(os.path.dirname(settings_file_path), os.path.expanduser(settings["file"]["input_tiff_path"]))
# Provide the parsed arguments to the caller, as a way for them to access
# any additional flags etc that they have specified
settings["parsed_args"] = args
return settings
# This next function taken from tqdm example code, to report progress during urlretrieve()
def tqdm_hook(t):
""" Wraps tqdm instance for use with urlretrieve() """
last_b = [0]
def update_to(b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] or -1,
remains unchanged.
"""
if tsize not in (None, -1):
t.total = tsize
displayed = t.update((b - last_b[0]) * bsize)
last_b[0] = b
return displayed
return update_to
def run(args, desc):
'''
Run the optical gater based on a settings.json file which includes
the path to the .tif file to be processed.
Params: raw_args list Caller should normally pass sys.argv[1:] here
desc str Description to provide as command line help description
'''
def add_extra_args(parser):
# Optional argument to force realtime playback (see FileOpticalGater constructor). Will default to false if command line option not specified
parser.add_argument("-r", "--realtime", dest="realtime", action="store_true", help="Replay in realtime (framerate as per settings file)")
settings = load_settings(args, desc, add_extra_args)
logger.success("Initialising gater...")
analyser = FileOpticalGater(
source=settings["file"]["input_tiff_path"],
settings=settings,
force_framerate=settings["parsed_args"].realtime
)
logger.success("Running server...")
analyser.run_server()
logger.success("Plotting summaries...")
analyser.plot_triggers()
analyser.plot_prediction()
analyser.plot_phase_histogram()
analyser.plot_phase_error_histogram()
analyser.plot_phase_error_with_time()
analyser.plot_running()
if __name__ == "__main__":
run(sys.argv[1:], "Run optical gater on image data contained in tiff file")
| Glasgow-ICG/open-optical-gating | open_optical_gating/cli/file_optical_gater.py | file_optical_gater.py | py | 16,916 | python | en | code | 3 | github-code | 90 |
43146249364 | import pytest
import uuid
from typing import List
from domains.entities.notes_entity import (
NoteEntity,
KeywordEntity
)
from tests.fixtures.notes import (
note_entity_fixture,
note_summary_entity_fixture
)
from apps.notes.exceptions import (
NoteNameLengthLimitError
)
from domains.constants import NOTE_NAME_LENGTH_LIMIT
@pytest.mark.unit
def test_note_entity_consistency(note_entity_fixture):
"""
```yaml
Note:
id: integer
authorId: integer
displayId: string
name: string
keywords: Keyword[]
status: integer
```
"""
assert isinstance(note_entity_fixture, NoteEntity)
assert isinstance(note_entity_fixture.id, int)
assert isinstance(note_entity_fixture.authorId, int)
assert isinstance(note_entity_fixture.displayId, str)
assert isinstance(note_entity_fixture.name, str)
assert bool(note_entity_fixture.keywords)
assert isinstance(note_entity_fixture.keywords, list)
assert isinstance(note_entity_fixture.status, int)
@pytest.mark.unit
def test_keyword_entity_consistency(note_entity_fixture):
"""
```yaml
Keyword:
id: integer
noteId: integer
posX: integer
posY: integer
text: string
parentId: integer
status: integer
- 1: UNSELECT
- 2: READ
- 3: EDIT
timestamp: integer
```
"""
keywords: List[KeywordEntity] = note_entity_fixture.keywords
for keyword in keywords:
assert isinstance(keyword.id, int)
assert isinstance(keyword.noteId, int)
assert isinstance(keyword.posX, int)
assert isinstance(keyword.posY, int)
assert isinstance(keyword.text, str)
assert isinstance(keyword.parentId, int) or keyword.parentId == None
assert isinstance(keyword.status, int)
assert isinstance(keyword.timestamp, int)
@pytest.mark.unit
def test_note_summary_consistency(note_summary_entity_fixture):
"""
```yaml
NoteSummary:
displayId: str
name: str
```
"""
summary = note_summary_entity_fixture
assert isinstance(summary.displayId, str)
assert isinstance(summary.name, str)
@pytest.mark.unit
def test_note_name_length_limit():
"""
Entity(NOTE1): Note.name max_length == 25
"""
note = NoteEntity(
id=1,
displayId=uuid.uuid4(),
authorId=1,
name='name',
keywords=[],
status=1
)
with pytest.raises(NoteNameLengthLimitError):
note = NoteEntity(
id=1,
displayId=uuid.uuid4(),
authorId=1,
name='n' * (NOTE_NAME_LENGTH_LIMIT+1),
keywords=[],
status=1
)
| knock-version-1-0/backend-main | src/tests/notes/entity_tests.py | entity_tests.py | py | 2,769 | python | en | code | 0 | github-code | 90 |
40898867343 | import json
from typing import Dict, List, Any
from paramiko import SSHClient
from paramiko.client import AutoAddPolicy
hostname = "10.20.40.224"
port = 22
username = "vedant"
password = "Mind@123"
try:
client: SSHClient = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
client.connect(hostname, port, username, password)
stdin, stdout, stderr = client.exec_command('mpstat -P ALL')
if stdout.channel.recv_exit_status() == 0:
data = stdout.read().decode("utf8")
cpuList = list()
listOfLines = data.split("\n")
del listOfLines[0:3]
listOfLines.pop(len(listOfLines) - 1)
finalDisk: Dict[str, List[Any]] = {
"result": cpuList
}
for line in listOfLines:
disk = " ".join(line.split())
diskArray = disk.split(" ")
temp = {
"cpu.core": diskArray[3],
"cpu.user.percent": float(diskArray[4]),
"cpu.system.percent": float(diskArray[6]),
"cpu.idle.percent": float(diskArray[13]),
}
finalDisk.get("result").append(temp)
else:
print(f'Error : {stderr.read().decode("utf8")}')
stdin.close()
stdout.close()
stderr.close()
client.close()
print(json.dumps(finalDisk))
except Exception as e:
print("An error occurred", e.with_traceback())
| Pruthviraj1223/pythonPlugins | ssh/cpu.py | cpu.py | py | 1,437 | python | en | code | 0 | github-code | 90 |
23782550601 | # -*- coding: utf-8 -*-
from __future__ import print_function
from keras.models import Model
from keras.layers import Flatten, Dense, Input
from keras.layers import Convolution2D, MaxPooling2D
from keras import backend as K
import utils
K.set_image_dim_ordering('th')
import warnings
warnings.filterwarnings("ignore")
def VGG16(weights_path = None, input_shape = (3, 224, 224)):
# Determine proper input shape
img_input = Input(shape=input_shape)
# Block 1
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(img_input)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(1000, activation='softmax', name='predictions')(x)
# Create model
model = Model(img_input, x)
# Load pre-trained weights if available
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == '__main__':
weights_path = utils.DATA_DIR + utils.WEIGHTS_FILE
model = VGG16(weights_path)
print(model.summary())
| noagarcia/keras_rmac | vgg16.py | vgg16.py | py | 2,688 | python | en | code | 84 | github-code | 90 |
17927440269 | n,c=map(int,input().split())
p=[list(map(int,input().split())) for _ in range(n)]
p.sort()
res = [0 for _ in range(100005)]
res0 = [[0,0] for _ in range(c)]
np = []
for s,t,c in p:
if res0[c-1][0] == 0:
res0[c-1] = [s,t]
elif res0[c-1][1] ==s:
res0[c-1][1] =t
else:
np.append(res0[c-1][:])
res0[c-1]=[s,t]
for sres0 in res0:
if sres0 != [0,0]:
np.append(sres0)
for s,t in np:
res[s] += 1
res[t+1]-=1
ress = [0]
for i in range(100005):
ress.append(ress[-1]+res[i])
print(max(ress)) | Aasthaengg/IBMdataset | Python_codes/p03504/s583156389.py | s583156389.py | py | 548 | python | en | code | 0 | github-code | 90 |
32086045363 | """CLI interaction implementation."""
import cmd
import shlex
import Dungeon.logic.DungeonUtils as utils
class Dungeon(cmd.Cmd):
"""Class that implements game."""
prompt = '(Dungeon) '
dungeon_map = [[[] for i in range(10)] for i in range(10)]
player_pos = (0, 0)
def do_add(self, args):
"""Add new monster."""
self.dungeon_map = utils.add_to_map(self.dungeon_map, args)
def do_show(self, args):
"""Print monsters."""
monsters = utils.get_present_monsters(self.dungeon_map)
for monster in monsters:
print(monster)
def do_attack(self, args):
"""Attack monster."""
monster_name = args
x, y = self.player_pos
monsters = self.dungeon_map[x][y]
for monster in monsters:
if monster.name == monster_name:
monster.hp -= 10
if monster.hp > 0:
print('{} lost 10 hp, now has {} hp'.format(monster.name, monster.hp))
else:
print('{} dies'.format(monster.name))
monsters.remove(monster)
break
else:
print('no {} here'.format(monster_name))
def do_move(self, args):
"""Move main character."""
direction = shlex.split(args)[0]
future_pos = utils.move(self.player_pos, direction)
if utils.is_pos_valid(future_pos):
self.player_pos = future_pos
x, y = self.player_pos
print('player at {} {}'.format(x, y))
if self.dungeon_map[x][y]:
print('encountered: ' + ', '.join(map(str, self.dungeon_map[x][y])))
else:
print('cannot move')
def complete_attack(self, prefix, line, start_index, end_index):
"""Autocomplete attack command."""
return utils.get_completion_attack(self.dungeon_map, self.player_pos, prefix)
def complete_move(self, prefix, line, start_index, end_index):
"""Autocomplete move command."""
return utils.get_completion_move(prefix)
| sanyavertolet/pythonprac | 20220328/1/Dungeon/cli/DungeonCli.py | DungeonCli.py | py | 2,073 | python | en | code | 1 | github-code | 90 |
35014018837 | import json
from os import mkdir
from os.path import expanduser, isfile, isdir
from todo_list.task import Task
class TaskNotFound(Exception):
def __init__(self,mes:str="")->None:
super().__init__()
self.mes=mes
def __str__(self)->None:
return f"TaskNotFound: {self.mes}"
class ListHandler:
def __init__(self,name:str,folder:str)->None:
self.name=name
self.folder=folder
self.open()
def open(self)->None:
if isfile(f"{self.folder}{self.name}.json"):
with open(f"{self.folder}{self.name}.json","r") as f:
self.data = json.load(f)
else:
if not isdir(self.folder):
mkdir(self.folder)
with open(f"{self.folder}{self.name}.json","w") as f:
f.write('{"tasks":{}}')
self.data=json.loads('{"tasks":{}}')
self.tasks=[]
for task in self.data["tasks"]:
self.tasks.append(Task(task,self.data["tasks"][task][0],self.data["tasks"][task][1]))
def get_tasks(self,state:int=None)->list:
if type(state)!=int and state!= None:
raise TypeError
elif state==None:
return self.tasks
else:
tasks_list=[]
for task in self.tasks:
if task.state==state:
tasks_list.append(task)
return tasks_list
def write(self)->None:
task_data={}
for task in self.tasks:
task_data.update(task.get_raw())
raw_str=json.dumps({"name":self.name,"tasks":task_data})
with open(f"{self.folder}{self.name}.json","w") as file:
file.write(raw_str)
def add_task(self,name:str,description:str,state:int)->None:
self.tasks.append(Task(name,description,state))
def delete_task(self,name:str)->None:
for index,task in enumerate(self.tasks):
if task.name == name:
del self.tasks[index]
return None
raise TaskNotFound(f"Task {name} not found")
def change_state(self,name:str,state:int=1)->None:
for task in self.tasks:
if task.name == name:
task.state=state
| Jonas-Luetolf/Todo-List | todo_list/listhandler.py | listhandler.py | py | 2,272 | python | en | code | 1 | github-code | 90 |
18140681949 | while 1:
list = [input().split()]
m = int(list[0][0])
f = int(list[0][1])
r = int(list[0][2])
if m == -1 and f == -1 and r == -1 :
break
elif m == -1 or f == -1 :
print("F")
elif m + f >= 80 :
print("A")
elif 65 <= m + f and m + f < 80 :
print("B")
elif 50 <= m + f and m + f < 65 :
print("C")
elif 30 <= m + f and m + f < 50 and 50 <= r :
print("C")
elif 30 <= m + f and m + f < 50 and r < 50 :
print("D")
elif m + f < 30 :
print("F") | Aasthaengg/IBMdataset | Python_codes/p02411/s766298256.py | s766298256.py | py | 547 | python | en | code | 0 | github-code | 90 |
13005146302 | from typing import List, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..modeling_utils import ModuleUtilsMixin
from .composition import AdapterCompositionBlock, BatchSplit, Parallel, Stack, adjust_tensors_for_parallel
from .configuration import PrefixTuningConfig
from .context import AdapterSetup, ForwardContext
from .layer import AdapterLayerBase
from .modeling import Activation_Function_Class
class PrefixTuning(nn.Module, ModuleUtilsMixin):
def __init__(
self,
n_layers: int,
n_heads: int,
input_size: int,
config: PrefixTuningConfig,
):
super().__init__()
self.n_layers = n_layers
self.n_heads = n_heads
self.input_size = input_size
self.n_embd_per_head = self.input_size // self.n_heads
self.config = config
self.wte = nn.Embedding(self.config.prefix_length, self.input_size)
self.control_trans = nn.Sequential(
nn.Linear(self.input_size, self.config.bottleneck_size),
Activation_Function_Class(self.config.non_linearity.lower()),
nn.Linear(self.config.bottleneck_size, self.n_layers * 2 * self.input_size),
)
self.dropout = nn.Dropout(self.config.dropout)
def eject(self):
input_tokens = torch.arange(self.config.prefix_length).long()
input_tokens = input_tokens.unsqueeze(0).expand(1, -1).to(self.device)
embs = self.wte(input_tokens)
key_values = self.control_trans(embs) # batch_size x prefix_length x n_layers*2*input_size
key_values = key_values.view(
self.config.prefix_length * self.n_layers * 2 * self.input_size
) # *2 for key and value
return key_values
def forward(self, batch_size):
input_tokens = torch.arange(self.config.prefix_length).long()
input_tokens = input_tokens.unsqueeze(0).expand(batch_size, -1).to(self.device)
embs = self.wte(input_tokens)
key_values = self.control_trans(embs) # batch_size x prefix_length x n_layers*2*input_size
key_values = key_values.view(
batch_size, self.config.prefix_length, self.n_layers * 2, self.n_heads, self.n_embd_per_head
) # *2 for key and value
key_values = self.dropout(key_values)
# n_layers * (2 x batch_size x n_heads x prefix_length x n_embd_per_head)
key_values = key_values.permute(2, 0, 3, 1, 4).split(2)
return key_values
class FlatPrefixTuning(nn.Module, ModuleUtilsMixin):
def __init__(
self,
n_layers: int,
n_heads: int,
input_size: int,
config: PrefixTuningConfig,
):
super().__init__()
self.n_layers = n_layers
self.n_heads = n_heads
self.input_size = input_size
self.n_embd_per_head = self.input_size // self.n_heads
self.config = config
self.control_trans = nn.Parameter(torch.randn(self.config.prefix_length * self.n_layers * 2 * self.input_size))
self.dropout = nn.Dropout(self.config.dropout)
def forward(self, batch_size):
key_values = (
self.control_trans.unsqueeze(0)
.expand(batch_size, -1)
.view(batch_size, self.config.prefix_length, self.n_layers * 2, self.n_heads, self.n_embd_per_head)
.to(self.device)
) # *2 for key and value
key_values = self.dropout(key_values)
# n_layers * (2 x batch_size x n_heads x prefix_length x n_embd_per_head)
key_values = key_values.permute(2, 0, 3, 1, 4).split(2)
return key_values
class PrefixTuningGroup(nn.ModuleDict):
def __init__(self, module_configs, prefix_tuning_config):
super().__init__()
if prefix_tuning_config["flat"]:
prefix_tuning_class = FlatPrefixTuning
else:
prefix_tuning_class = PrefixTuning
for k, kwargs in module_configs.items():
self[k] = prefix_tuning_class(**kwargs, config=prefix_tuning_config)
def eject(self):
"""Converts all PrefixTuning modules into FlatPrefixTuning modules."""
for k, v in self.items():
if isinstance(v, PrefixTuning):
config = v.config.replace(flat=True)
self[k] = FlatPrefixTuning(v.n_layers, v.n_heads, v.input_size, config)
weights = v.eject()
self[k].control_trans = nn.Parameter(weights)
def forward(self, batch_size):
return {k: v(batch_size) for k, v in self.items()}
class PrefixTuningPool(nn.Module):
"""
The model layer that holds all Prefix Tuning prefixes. While each Transformers layer has its own prefix, this layer
is shared across all Transformers layers.
How it works:
1. A `PrefixTuningShim` module that sets this module as pool module is added to each layer.
2. On adding a prefix, each shim module where a prefix should be added increments a counter in `prefix_counts`.
3. Finally, the base model class confirms adding a new prefix by calling `confirm_prefix()`.
4. This module adds a prefix layer that produces outputs corresponding to the indicated number of layers.
Notes:
- The forward call to this layer is executed in the ForwardContext of each model pass.
- All other methods of this class (except for `confirm_prefix()`) should be called exclusively by
`PrefixTuningShim`.
Args:
config (:class:`~transformers.PretrainedConfig`): The model config.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.prefix_counts = {}
self.prefix_tunings = nn.ModuleDict()
def indicate_prefix(self, prefix_name: str, location_key: str, **kwargs):
if prefix_name not in self.prefix_counts:
self.prefix_counts[prefix_name] = {location_key: {"count": 1, **kwargs}}
elif location_key not in self.prefix_counts[prefix_name]:
self.prefix_counts[prefix_name][location_key] = {"count": 1, **kwargs}
else:
# TODO-AH: Check if kwargs are the same
self.prefix_counts[prefix_name][location_key]["count"] += 1
return self.prefix_counts[prefix_name][location_key]["count"] - 1
def confirm_prefix(self, prefix_name: str):
"""Create Prefix Tuning module based on shim layer infications."""
prefix_tuning_config = self.config.adapters.match(prefix_name, PrefixTuningConfig)
if prefix_tuning_config is None:
return
if prefix_name not in self.prefix_counts:
raise ValueError(f"Prefix {prefix_name} not found in PrefixTuningPool")
module_configs = {}
for location_key, location_config in self.prefix_counts[prefix_name].items():
module_configs[location_key] = {
"n_layers": location_config["count"],
"n_heads": location_config["n_heads"],
"input_size": location_config["input_size"],
}
prefix_tuning = PrefixTuningGroup(module_configs, prefix_tuning_config)
prefix_tuning.train(self.training) # make sure training mode is consistent
self.prefix_tunings[prefix_name] = prefix_tuning
del self.prefix_counts[prefix_name]
def delete_prefix(self, prefix_name: str):
if prefix_name in self.prefix_tunings:
del self.prefix_tunings[prefix_name]
def enable_prefix(self, prefix_name: str):
if prefix_name in self.prefix_tunings:
for param in self.prefix_tunings[prefix_name].parameters():
param.requires_grad = True
def get_prefix(self, prefix_name: str):
if prefix_name in self.prefix_tunings:
return self.prefix_tunings[prefix_name]
else:
return None
def forward(self, *args, **kwargs):
context = AdapterSetup.get_context()
if context is not None:
adapter_setup = context.adapter_setup
else:
adapter_setup = self.config.adapters.active_setup
prefix_states = {}
if adapter_setup is not None:
# Infer batch size
input_tensor_names = ["input_ids", "decoder_input_ids", "attention_mask", "inputs_embeds", "pixel_values"]
batch_size = None
for name in input_tensor_names:
if kwargs.get(name, None) is not None:
batch_size = kwargs[name].size(0)
break
if batch_size is None:
if len(args) > 0:
batch_size = args[0].size(0)
else:
raise ValueError("Could not infer batch size for prefix tuning from inputs.")
# Pass to sub-layers
for name in adapter_setup.flatten():
if name in self.prefix_tunings:
prefix_states[name] = self.prefix_tunings[name](batch_size)
return prefix_states
class PrefixTuningShim(AdapterLayerBase, nn.Module):
"""
Representation of a Prefix Tuning layer within one Transformer layer. This class implements `AdapterLayerBase` for
compatibility with adapters. It uses `PrefixTuningPool` in the background and `set_pool()` must be called after
initialization.
Args:
location_key (str): The id describing the location of this layer in the model.
Currently, can be "encoder_prefix", "cross_prefix" or None.
config (:class:`~transformers.PretrainedConfig`): The model config.
"""
def __init__(self, location_key: str, config, add_model_type_to_key: bool = False):
super().__init__()
self.config = config
self.location_key = location_key
if add_model_type_to_key:
self.location_key = f"{self.config.model_type}_{self.location_key}"
self.prefixes = {}
self.prefix_gates = nn.ModuleDict()
def set_pool(self, pool: PrefixTuningPool):
self.__setattr__("pool", pool)
def add_adapter(self, adapter_name: str, layer_idx: int):
self.layer_idx = layer_idx
# only match location keys for which we have config keys
if self.location_key.startswith("cross") or self.location_key.startswith("encoder"):
used_location_key = self.location_key
else:
used_location_key = None
prefix_tuning_config = self.config.adapters.match(
adapter_name,
config_type=PrefixTuningConfig,
layer_idx=self.layer_idx,
location_key=used_location_key,
)
if prefix_tuning_config is not None:
prefix_id = self.pool.indicate_prefix(
adapter_name,
self.location_key,
n_heads=self.config.num_attention_heads,
input_size=self.config.hidden_size,
)
self.prefixes[adapter_name] = prefix_id
if prefix_tuning_config.use_gating:
gate_outputs = 1 if prefix_tuning_config.shared_gating else 2
gate = nn.Linear(self.config.hidden_size, gate_outputs)
gate.weight.data.normal_(mean=0.0, std=0.02)
self.prefix_gates[adapter_name] = gate
def delete_adapter(self, adapter_name: str):
self.pool.delete_prefix(adapter_name)
if adapter_name in self.prefixes:
del self.prefixes[adapter_name]
if adapter_name in self.prefix_gates:
del self.prefix_gates[adapter_name]
def add_fusion_layer(self, adapter_names: Union[List, str]):
pass # not applicable to prefix tuning
def delete_fusion_layer(self, adapter_names: Union[List, str]):
pass # not applicable to prefix tuning
def enable_adapters(self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_fusion: bool):
if unfreeze_adapters:
for prefix_tuning_name in adapter_setup.flatten():
self.pool.enable_prefix(prefix_tuning_name)
if prefix_tuning_name in self.prefix_gates:
for param in self.prefix_gates[prefix_tuning_name].parameters():
param.requires_grad = unfreeze_adapters
def get_adapter(self, adapter_name):
return_dict = nn.ModuleDict()
# Make sure to only return params once
if adapter_name in self.prefixes and self.prefixes[adapter_name] == 0:
prefix_module = self.pool.get_prefix(adapter_name)
if prefix_module is not None:
return_dict["prefix"] = prefix_module[self.location_key]
if adapter_name in self.prefix_gates:
return_dict["gate"] = self.prefix_gates[adapter_name]
if len(return_dict) > 0:
return return_dict
return None
def single_forward(
self,
adapter_name: str,
key_states,
value_states,
residual_input,
attention_mask=None,
invert_mask=True,
idx_range=None,
):
prefix_id = self.prefixes[adapter_name]
batch_size = key_states.size(0)
# Retrieve pre-computed prefix states from context
context = ForwardContext.get_context()
# batch_size x n_heads x prefix_length x n_embd_per_head
prefix_keys, prefix_values = context.prefix_states[adapter_name][self.location_key][prefix_id]
# select index range for batch split
if idx_range is not None:
prefix_keys = prefix_keys[idx_range]
prefix_values = prefix_values[idx_range]
if adapter_name in self.prefix_gates:
gate = self.prefix_gates[adapter_name]
gate_output = torch.mean(torch.sigmoid(gate(residual_input)), dim=1)
self._store_gating_score(adapter_name, gate_output)
gate_output_key = gate_output[:, 0].view(-1, 1, 1, 1)
gate_output_value = gate_output[:, -1].view(-1, 1, 1, 1)
prefix_keys = prefix_keys * gate_output_key
prefix_values = prefix_values * gate_output_value
# replicate for Parallel block
prefix_keys, prefix_values = adjust_tensors_for_parallel(key_states, prefix_keys, prefix_values)
key_states = torch.cat([prefix_keys, key_states], dim=2)
value_states = torch.cat([prefix_values, value_states], dim=2)
if attention_mask is not None:
if attention_mask.dim() == 2: # e.g. for DistilBERT, attention_mask has shape (batch_size, seq_len)
prefix_mask = torch.ones(batch_size, prefix_keys.size(2)).to(attention_mask.device)
else:
prefix_mask = torch.ones(batch_size, 1, attention_mask.size(2), prefix_keys.size(2)).to(
attention_mask.device
)
if invert_mask:
prefix_mask = 1.0 - prefix_mask
(prefix_mask,) = adjust_tensors_for_parallel(attention_mask, prefix_mask)
attention_mask = torch.cat([prefix_mask, attention_mask], dim=-1)
return key_states, value_states, residual_input, attention_mask
def _pad_and_concat(self, max_prefix_length, outputs, invert_mask=True):
"""Pads all key & value states to the lFongest prefix length in the current batch.
This is required e.g. for stacked prefix tunings.
"""
all_key_states, all_value_states, all_residual_input, all_attention_mask = [], [], [], []
for key_states, value_states, residual_input, attention_mask in outputs:
# pad sizes
pad_length = max_prefix_length - key_states.shape[-2]
pad_size = (0, 0, pad_length, 0)
key_states = F.pad(key_states, pad_size, "constant", self.config.pad_token_id)
value_states = F.pad(value_states, pad_size, "constant", self.config.pad_token_id)
# pad attention mask
if pad_length > 0:
# Masking the padded tokens only works correctly if attention_mask is set
# We assume this to be the case at this point
assert attention_mask is not None, "Attention mask must be set for prefix tuning"
attention_mask = F.pad(
attention_mask,
(max_prefix_length - attention_mask.shape[-1], 0),
"constant",
1.0 if invert_mask else 0.0,
)
all_key_states.append(key_states)
all_value_states.append(value_states)
all_residual_input.append(residual_input)
all_attention_mask.append(attention_mask)
all_key_states = torch.cat(all_key_states, dim=0)
all_value_states = torch.cat(all_value_states, dim=0)
all_residual_input = torch.cat(all_residual_input, dim=0)
all_attention_mask = torch.cat(all_attention_mask, dim=0) if attention_mask is not None else None
return all_key_states, all_value_states, all_residual_input, all_attention_mask
def adapter_stack(
self,
adapter_setup: Stack,
key_states,
value_states,
residual_input,
attention_mask=None,
invert_mask=True,
idx_range=None,
lvl=0,
):
for adapter_stack_layer in adapter_setup:
# Break if setup is too deep
if isinstance(adapter_stack_layer, AdapterCompositionBlock) and lvl >= 1:
raise ValueError(
"Specified adapter setup is too deep. Cannot have {} at level {}".format(
adapter_stack_layer.__class__.__name__, lvl
)
)
# We have a nested parallel layer -> call parallel method
elif isinstance(adapter_stack_layer, Parallel):
key_states, value_states, residual_input, attention_mask = self.adapter_parallel(
adapter_stack_layer,
key_states,
value_states,
residual_input,
attention_mask,
invert_mask=invert_mask,
idx_range=idx_range,
lvl=lvl + 1,
)
# We have a nested batch split block -> call batchsplit method
elif isinstance(adapter_stack_layer, BatchSplit):
key_states, value_states, residual_input, attention_mask = self.adapter_batchsplit(
adapter_stack_layer,
key_states,
value_states,
residual_input,
attention_mask,
invert_mask=invert_mask,
idx_range=idx_range,
lvl=lvl + 1,
)
# We have a single prefix tuning module part of this model -> forward pass
elif adapter_stack_layer in self.prefixes:
key_states, value_states, _, attention_mask = self.single_forward(
adapter_stack_layer,
key_states,
value_states,
residual_input,
attention_mask,
invert_mask,
idx_range=idx_range,
)
# Nesting other composition blocks is invalid
elif isinstance(adapter_stack_layer, AdapterCompositionBlock):
raise ValueError(
"Invalid adapter setup. Cannot nest {} in {}".format(
adapter_stack_layer.__class__.__name__, adapter_setup.__class__.__name__
)
)
# As all prefix tuning modules are centrally stored, fail if not found.
else:
raise ValueError(f"Unknown prefix tuning name '{adapter_stack_layer}'.")
return key_states, value_states, residual_input, attention_mask
def adapter_parallel(
self,
adapter_setup: Parallel,
key_states,
value_states,
residual_input,
attention_mask=None,
invert_mask=True,
idx_range=None,
lvl=0,
):
"""
For parallel execution of the adapters on the same input. This means that the input is repeated N times before
feeding it to the adapters (where N is the number of adapters).
"""
context = ForwardContext.get_context()
if not context.adapters_parallelized:
orig_batch_size = residual_input.shape[0]
residual_input = residual_input.repeat(self.config.adapters.active_setup.parallel_channels, 1, 1, 1)
key_states = key_states.repeat(self.config.adapters.active_setup.parallel_channels, 1, 1, 1)
value_states = value_states.repeat(self.config.adapters.active_setup.parallel_channels, 1, 1, 1)
if attention_mask is not None:
if attention_mask.dim() == 2: # e.g. for DistilBERT, attention_mask has shape (batch_size, seq_len)
attention_mask = attention_mask.repeat(self.config.adapters.active_setup.parallel_channels, 1)
else:
attention_mask = attention_mask.repeat(
self.config.adapters.active_setup.parallel_channels, 1, 1, 1
)
context.adapters_parallelized = True
else:
# The base model should handle replication of input.
# Therefore, we assume the (replicated) input batch to be divisible by the number of parallel channels.
if residual_input.shape[0] % adapter_setup.parallel_channels != 0:
raise ValueError(
"The total input batch size in a Parallel adapter block must be divisible by the number of"
" parallel channels."
)
orig_batch_size = residual_input.shape[0] // adapter_setup.parallel_channels
# sequentially feed different parts of the blown-up batch into different adapters
children_outputs = []
# track which prefix is longest for padding in the end
max_prefix_length = 0
for i, child in enumerate(adapter_setup):
# construct inputs to child modules
inputs = {
"key_states": key_states[i * orig_batch_size : (i + 1) * orig_batch_size],
"value_states": value_states[i * orig_batch_size : (i + 1) * orig_batch_size],
"residual_input": residual_input[i * orig_batch_size : (i + 1) * orig_batch_size],
"attention_mask": attention_mask[i * orig_batch_size : (i + 1) * orig_batch_size]
if attention_mask is not None
else None,
"invert_mask": invert_mask,
"idx_range": idx_range,
}
# Case 1: We have a nested stack -> call stack method
if isinstance(child, Stack):
child_outputs = self.adapter_stack(
child,
**inputs,
lvl=lvl + 1,
)
children_outputs.append(child_outputs)
# Case 2. We have a nested batchsplit block -> call batchsplit method
elif isinstance(child, BatchSplit):
child_outputs = self.adapter_batchsplit(
child,
**inputs,
lvl=lvl + 1,
)
children_outputs.append(child_outputs)
# Case 3: We have a single adapter which is part of this module -> forward pass
elif child in self.prefixes:
child_outputs = self.single_forward(
child,
**inputs,
)
children_outputs.append(child_outputs)
# Case 4: nesting other composition blocks is invalid
elif isinstance(child, AdapterCompositionBlock):
raise ValueError(
"Invalid adapter setup. Cannot nest {} in {}".format(
child.__class__.__name__, adapter_setup.__class__.__name__
)
)
# As all prefix tuning modules are centrally stored, fail if not found.
else:
raise ValueError(f"Unknown prefix tuning name '{child}'.")
# update max prefix length
current_prefix_length = child_outputs[0].shape[-2]
if current_prefix_length > max_prefix_length:
max_prefix_length = current_prefix_length
# concatenate all outputs and return
key_states, value_states, residual_input, attention_mask = self._pad_and_concat(
max_prefix_length, children_outputs, invert_mask=invert_mask
)
return key_states, value_states, residual_input, attention_mask
def adapter_batchsplit(
self,
adapter_setup: BatchSplit,
key_states,
value_states,
residual_input,
attention_mask=None,
invert_mask=True,
idx_range=None,
lvl=0,
):
if not sum(adapter_setup.batch_sizes) == key_states.shape[0]:
raise IndexError(
"The given batch has a size of {} which is not compatible with batch_sizes {}".format(
key_states.shape[0], adapter_setup.batch_sizes
)
)
children_outputs = []
# track which prefix is longest for padding in the end
max_prefix_length = 0
for i, adapter_block in enumerate(adapter_setup):
# compute ids of sequences that should be passed to the ith adapter
if idx_range is None:
split_idx_range = range(
sum(adapter_setup.batch_sizes[:i]),
sum(adapter_setup.batch_sizes[: i + 1]),
)
else:
split_idx_range = range(
idx_range.start + sum(adapter_setup.batch_sizes[:i]),
idx_range.start + sum(adapter_setup.batch_sizes[: i + 1]),
)
inputs = {
"key_states": key_states[split_idx_range],
"value_states": value_states[split_idx_range],
"residual_input": residual_input[split_idx_range],
"attention_mask": attention_mask[split_idx_range] if attention_mask is not None else None,
"invert_mask": invert_mask,
"idx_range": split_idx_range,
}
# Case 1: We have a nested stack -> call stack method
if isinstance(adapter_block, Stack):
child_outputs = self.adapter_stack(
adapter_block,
**inputs,
lvl=lvl + 1,
)
children_outputs.append(child_outputs)
# Case 2: We have a nested batch split block -> call batchsplit method
elif isinstance(adapter_block, BatchSplit):
child_outputs = self.adapter_batchsplit(
adapter_block,
**inputs,
lvl=lvl + 1,
)
children_outputs.append(child_outputs)
# Case 4: We have a single adapter which is part of this module -> forward pass
elif adapter_block in self.prefixes:
child_outputs = self.single_forward(
adapter_block,
**inputs,
)
children_outputs.append(child_outputs)
# Case 5: nesting other composition blocks is invalid
elif isinstance(adapter_block, AdapterCompositionBlock):
raise ValueError(
"Invalid adapter setup. Cannot nest {} in {}".format(
adapter_block.__class__.__name__, adapter_setup.__class__.__name__
)
)
# As all prefix tuning modules are centrally stored, fail if not found.
else:
raise ValueError(f"Unknown prefix tuning name '{adapter_block}'.")
# update max prefix length
current_prefix_length = child_outputs[0].shape[-2]
if current_prefix_length > max_prefix_length:
max_prefix_length = current_prefix_length
# concatenate all outputs and return
key_states, value_states, residual_input, attention_mask = self._pad_and_concat(
max_prefix_length, children_outputs, invert_mask=invert_mask
)
return key_states, value_states, residual_input, attention_mask
def forward(self, key_states, value_states, residual_input, attention_mask=None, invert_mask=True):
adapter_setup = self.get_active_setup(self.prefixes)
if adapter_setup is not None:
if isinstance(adapter_setup, Stack):
key_states, value_states, _, attention_mask = self.adapter_stack(
adapter_setup,
key_states,
value_states,
residual_input,
attention_mask=attention_mask,
invert_mask=invert_mask,
)
elif isinstance(adapter_setup, Parallel):
key_states, value_states, _, attention_mask = self.adapter_parallel(
adapter_setup,
key_states,
value_states,
residual_input,
attention_mask=attention_mask,
invert_mask=invert_mask,
)
elif isinstance(adapter_setup, BatchSplit):
key_states, value_states, _, attention_mask = self.adapter_batchsplit(
adapter_setup,
key_states,
value_states,
residual_input,
attention_mask=attention_mask,
invert_mask=invert_mask,
)
else:
raise ValueError(f"Invalid adapter setup. Cannot use {adapter_setup} with prefix tuning.")
return key_states, value_states, attention_mask
| adapter-hub/adapter-transformers | src/transformers/adapters/prefix_tuning.py | prefix_tuning.py | py | 30,155 | python | en | code | 1,700 | github-code | 90 |
27613156404 | import os
import readline
from ghost.core.badges import Badges
from ghost.core.server import Server
from ghost.core.helper import Helper
from ghost.core.ghost import Ghost
class Console:
def __init__(self):
self.badges = Badges()
self.server = Server()
self.helper = Helper()
self.ghost = Ghost()
def banner(self):
print("""
________.__ __ ,
/ _____/| |__ ____ _______/ |_ \\`-, , =-
/ \\ ___| | \\ / _ \\/ ___/\\ __\\ .-._/ \\_____)\\
\\ \\_\\ \\ Y ( <_> )___ \\ | | (" / =-
\\______ /___| /\\____/____ > |__| '-; ,_____.-' =-
\\/ \\/ \\/ /__.'
""")
print("Ghost Framework " + self.helper.version)
print("--------------------")
print("""
[*] Also explore our last projects:
* HatSploit (https://github.com/EntySec/HatSploit) - Modular exploitation
* HatVenom (https://github.com/EntySec/HatVenom) - Binary exploitation
* Shreder (https://github.com/EntySec/Shreder) - SSH bruteforce
* RomBuster (https://github.com/EntySec/RomBuster) - Router hacking
* CamOver (https://github.com/EntySec/CamOver) - Camera hacking
* CamRaptor (https://github.com/EntySec/CamRaptor) - Camera hacking
[i] Please rate my work by star :)
Our Twitter: @entysec
CEO Twitter: @enty8080
""")
def shell(self):
readline.parse_and_bind('tab: complete')
while True:
try:
ui = input('\033[4mghost\033[0m> ').strip(" ")
ui = ui.split()
if not ui:
continue
elif ui[0] == "exit":
break
elif ui[0] == "clear":
os.system("clear")
elif ui[0] == "help":
print("")
print("Core Commands")
print("=============")
print("")
print(" Command Description")
print(" ------- -----------")
print(" clear Clear terminal window.")
print(" connect Connect to the specified device.")
print(" disconnect Disconnect specified device.")
print(" exit Exit Ghost Framework.")
print(" help Show available commands.")
print("")
elif ui[0] == "connect":
if len(ui) < 2:
print("Usage: connect <address>")
else:
try:
if len(ui[1].split(':')) < 2:
self.server.connect(ui[1], 5555)
else:
self.server.connect(ui[1].split(':')[0], ui[1].split(':')[1])
except SystemExit:
pass
elif ui[0] == "disconnect":
if len(ui) < 2:
print("Usage: disconnect <address>")
else:
try:
self.ghost.disconnect(ui[1].split(':')[0])
except SystemExit:
pass
else:
print(self.badges.E + "Unrecognized command!")
except (EOFError, KeyboardInterrupt):
pass
except Exception as e:
print("An error occurred: " + str(e) + "!")
| Farhan-Malik/Ghost-adb | ghost/core/console.py | console.py | py | 3,698 | python | en | code | 0 | github-code | 90 |
12973337052 | ################################
# Timecomplexity: O(N)
# Spacecomplexity: O(1)
################################
def findComplement(num):
temp_array = num
# Bit for performing xor with each bit
one_bit = 1
# Loop for performing one's compliment
while temp_array :
# Performing XOR operation over each bit
num = num ^ one_bit
# Performing left bit by 1
one_bit = one_bit << 1
# Performing right bit on temp_array by 1
temp_array = temp_array >> 1
return num
if __name__ == "__main__":
print(findComplement(5))
| harishdasari1595/Personal_projects | Algorithms and Datastructure/Arrays/one_s_complement.py | one_s_complement.py | py | 601 | python | en | code | 0 | github-code | 90 |
18594915272 | import fileinput
import json
import pprint
import re
import string
import gmplot
import gensim
from tweet_parser.tweet import Tweet
from tweet_parser.tweet_parser_errors import NotATweetError
from textblob import TextBlob
from collections import Counter
def open_tweets(filename):
tweets = []
for line in fileinput.FileInput(filename):
try:
tweet_dict = json.loads(line)
tweet = Tweet(tweet_dict)
except (json.JSONDecodeError,NotATweetError):
pass
tweets.append(tweet)
return tweets
def draw_heatmap(map_center_lat, map_center_long, zoom, plot_coordinates):
gmap = gmplot.GoogleMapPlotter(map_center_lat, map_center_long, zoom)
gmap.heatmap(plot_coordinates['latitudes'], plot_coordinates['longitudes'])
gmap.draw("python_heatmap.html")
def processTweet(tweet):
# To lowercase
tweet = tweet.lower()
# Remove HTML special entities (e.g. &)
tweet = re.sub(r'\&\w*;', '', tweet)
#Convert @username to AT_USER
tweet = re.sub('@[^\s]+','',tweet)
# Remove tickers
tweet = re.sub(r'\$\w*', '', tweet)
# Remove hyperlinks
tweet = re.sub(r'https?:\/\/.*\/\w*', '', tweet)
# # Remove Punctuation and split 's, 't, 've with a space for filter
# tweet = re.sub(r'[' + string.punctuation.replace('@', '') + ']+', ' ', tweet)
# Remove whitespace (including new line characters)
tweet = re.sub(r'\s\s+', ' ', tweet)
# Remove single space remaining at the front of the tweet.
tweet = tweet.lstrip(' ')
# Remove characters beyond Basic Multilingual Plane (BMP) of Unicode:
tweet = ''.join(c for c in tweet if c <= '\uFFFF')
# Bunch st and ave names
tweet = tweet.replace(' st ', 'st ')
tweet = tweet.replace(' ave ', 'ave ')
# Remove stop words
stoplist = set('for a of the and to in that has on at'.split(' '))
tweet_tokens = [word for word in tweet.lower().split() if word not in stoplist]
return tweet_tokens
def main():
#Open list of tweets
tweets = open_tweets("tweet_archive_mtafail.json")
tweet_coordinates = {
"latitudes": [tweet.geo_coordinates.get('latitude') for tweet in tweets if tweet.geo_coordinates is not None],
"longitudes": [tweet.geo_coordinates.get('longitude') for tweet in tweets if tweet.geo_coordinates is not None]
}
#Number of tweets with hashtag "#MTAFail"
print("Number of tweets with #MTAFail: {}".format(len(tweets)))
#Other hashtags mentioned
hashtags = [tweet.hashtags for tweet in tweets]
hashtags_list = [hashtag for hashtag_list in hashtags for hashtag in hashtag_list]
hashtags_count = Counter(hashtags_list).most_common(5)
pprint.pprint(hashtags_count)
#User mentions
user_mentions = [tweet.user_mentions for tweet in tweets]
user_mentions_list = [user_mentions.get("screen_name") for user_mentions_list in user_mentions for user_mentions in user_mentions_list]
user_mentions_count = Counter(user_mentions_list).most_common(5)
pprint.pprint(user_mentions_count)
#Plot heatmap
draw_heatmap(40.7128, -74.0060, 11, tweet_coordinates)
# Process tweets
tweet_corpus = [tweet.text for tweet in tweets]
processed_corpus = [processTweet(tweet) for tweet in tweet_corpus]
# Word Vector generation
model = gensim.models.Word2Vec(processed_corpus, min_count=3)
word_vector = model.wv
# Streetname word vector similarity
streetnames = []
for tweet in processed_corpus:
for word in tweet:
if ('ave' in word or 'st' in word) and bool(re.search(r'\d', word)):
streetnames.append(word)
pprint.pprint(Counter(streetnames).most_common(10))
for streetname in Counter(streetnames).most_common(10):
print("Word vector for {}".format(streetname))
try:
similar_words = word_vector.similar_by_word(streetname)
except KeyError:
pass
pprint.pprint(similar_words)
# Noun phrase analysis
tweets_noun_phrases = []
for tweet in processed_corpus[0:10]:
noun_phrases = TextBlob(' '.join(tweet)).noun_phrases
print(noun_phrases)
for phrase in noun_phrases:
tweets_noun_phrases.append(phrase)
pprint.pprint(Counter(tweets_noun_phrases).most_common(20))
main() | sruti/talktransit | main.py | main.py | py | 4,340 | python | en | code | 0 | github-code | 90 |
13892873274 | import random
# Q3 a
class Rectangle:
def __init__(self, width, height, color):
if not isinstance(width, (int, float)):
raise TypeError("Width must be number")
if not isinstance(height, (int, float)):
raise TypeError("Height must be number")
self.width = width
self.height = height
self.color = color
def __str__(self):
return f"Rect {self.width} x {self.height} with col {self.color}"
def area(self):
return self.width * self.height
def perimeter(self):
return 2 * self.width + 2 * self.height
rect_bucket = []
color = (100, 255, 100)
for r in range(10):
r1 = random.randrange(5, 33)
r2 = random.randrange(5, 33)
rect_bucket.append(Rectangle(r1, r2, color))
#for r in rect_bucket:
# print(r)
# print("Area", r.area())
# print("Perim", r.perimeter())
class Person:
def __init__(self):
pass
#print(type(Person()))
# Q4
class BankAccount:
def __init__(self, customer, amount=0):
self.name = customer
self.balance = amount
self.acc_number = 1000
def __repr__(self):
return f"Name:{self.name}, Balance:€{self.balance}, Acc:{self.acc_number}"
def withdraw(self, amount):
self.balance -= amount
def deposit(self, amount):
self.balance += amount
a = BankAccount("Ted", 298)
print(a)
class SavingsAccount(BankAccount):
def __init__(self, customer, amount):
if amount < 100:
raise ValueError("Amount must be 100 or more")
super().__init__(customer, amount)
def withdraw(self, amount):
if self.balance - amount < 80:
print("Cannot withdraw funds- min level exceeded")
else:
self.balance -= amount
def deposit(self, amount):
if amount < 10:
print("Min deposit is 10")
else:
self.balance += amount
accounts = []
for a in range(10):
accounts.append(SavingsAccount("Ted", random.randrange(100,499)))
for acc in accounts:
amnt = random.randrange(20)
print(acc)
acc.deposit(amnt)
print(acc)
acc.withdraw(amnt)
print(acc)
print()
acc.withdraw
| seanrattigan/SW_Arch_OOP | exam_code/oop_2021_exam_code.py | oop_2021_exam_code.py | py | 2,287 | python | en | code | 0 | github-code | 90 |
32408780806 | import os
import random
import cv2
badcase_path = r'E:\L2_eval\new_data\badcase\badcase.txt'
data_path = r'G:\test_data\new_data\crop_images'
save_path = r'E:\L2_eval\new_data\images'
record_txt_path = r'E:\L2_eval\new_data\images\1.txt'
badcase_list = list()
with open(badcase_path, 'r') as f:
line = f.readline()
while line:
badcase = os.path.split(line)[1].replace('\n', '')
badcase_list.append(badcase)
line = f.readline()
filenames = os.listdir(data_path)
count = 0
for filename in filenames:
if (filename in badcase_list):
print(filename)
continue
if random.randint(0, 4):
continue
img_path = os.path.join(data_path, filename)
image = cv2.imread(img_path)
save_dir = os.path.join(save_path, filename)
cv2.imwrite(save_dir, image)
with open(record_txt_path, 'a') as f:
f.write(img_path + "\n")
count += 1
if count == 1000:
break
| Daming-TF/HandData | scripts/Cleaning_Metric_Evaluation_Tool/test.py | test.py | py | 946 | python | en | code | 1 | github-code | 90 |
45814153703 | # -*- coding: utf-8 -*-
import cv2
import numpy as np
from pyzbar.pyzbar import decode
video = cv2.VideoCapture(0, cv2.CAP_DSHOW)
video.set(3, 640)
video.set(4, 480)
with open("pessoas_autorizadas.txt") as arquivo:
minha_lista = arquivo.read().splitlines() # separar por linhas
while True:
check, frame = video.read()
for barcode in decode(frame):
leitura = barcode.data.decode("utf-8")
print(barcode.data, leitura)
if leitura in minha_lista:
resposta: str = "Autorizado"
cor = (0, 255, 0)
else:
resposta = "Não autorizado"
cor = (0, 0, 255)
pontos = np.array([barcode.polygon], np.int32)
pontos = pontos.reshape((-1, 1, 2))
cv2.polylines(frame, [pontos], True, cor, 5)
pontos2 = barcode.rect
cv2.putText(frame, resposta, (pontos2[0], pontos2[1]), cv2.FONT_HERSHEY_SIMPLEX,
0.9, cor, 2)
cv2.imshow("Resultado", frame)
# cv2.waitKey(1) # 1 milissegundo
tecla = cv2.waitKey(2)
# parar se o usuário clicar em "Esc"
if tecla == 27:
cv2.destroyAllWindows()
break
| veniciocosta/barcode_scaner | scan_autenticate.py | scan_autenticate.py | py | 1,157 | python | pt | code | 0 | github-code | 90 |
35170106156 | import numpy as np
import cv2 as cv
import tensorflow as tf
import os
import matplotlib.pyplot as plt
#convert image to tensor
XDIM = 2048
YDIM = 11*XDIM//8
LINE = 3*XDIM//8
#assumes the images in the folder look like Band1.jpg, Band2.jpg etc
def folder_to_array(folder):
images = []
for i in range(1,12):
img = cv.imread(os.path.join(folder,"Band%d.jpg" % i))
#print("Band%d.jpg" % i)
if img is not None:
#convert to black and white
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
img = cv.resize(img,(YDIM, XDIM))
images.append(img)
return images
#convert image to tensor that can go into model
def get_tensor_pred(images):
images_tensor = tf.convert_to_tensor(images,dtype =tf.float32)
SAMPLES,BANDS,HEIGHT,WIDTH = images_tensor.shape
image_tensor = tf.transpose(images_tensor,[0,2,3,1])
return image_tensor
#create mask from prediction
def create_mask_pred(pred_mask):
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
return pred_mask[0]
#load model
CLASS_DICT = {0: 'marsh', 1: 'water', 2: 'upland', 3: 'unlabeled'}
#it is not robust to re-sizing.
def pred_from_saved(
model_path = 'saved-models/Depth2RandomFlip89val',
folder_path = "train_jpg",
startx = 0,
starty = 0,
zoomx = 1000,
zoomy=1000,
fig_size=5
):
pie_path = f'{folder_path}/PIE_scaled'
vcr_path = f'{folder_path}/VCR_scaled'
gce_path = f'{folder_path}/GCE_scaled'
startx = min(XDIM,startx)
starty = min(XDIM,startx)
endx = min(XDIM,startx + zoomx)
endy = min(YDIM,starty + zoomy)
zoomx = min(XDIM,zoomx)
zoomy = min(YDIM,zoomy)
WHICH_BAND = 4
BANDS = 11
model = tf.keras.models.load_model(model_path)
images = np.array([folder_to_array(gce_path), folder_to_array(pie_path), folder_to_array(vcr_path)])
masks = np.load('numpymasks.npy')
masks[:,:,LINE-5:LINE+5,:]=np.nan
small_masks = masks[:,0,startx:endx ,starty:endy]
num_class = len(CLASS_DICT)
site_dict = {0:'GCE', 1:'PIE', 2:'VCR'}
site_perm = {0:1, 1:2, 2:0}
COLOR = 'red'
plt.rcParams['text.color'] = COLOR
plt.rcParams['axes.labelcolor'] = COLOR
plt.rcParams['xtick.color'] = COLOR
plt.rcParams['ytick.color'] = COLOR
fig = plt.figure(figsize=(fig_size, fig_size))
for i in range(3):
small_image = get_tensor_pred(images[site_perm[i],:,startx:endx ,starty:endy].reshape([1,BANDS,zoomx,zoomy]))
small_1hot = model.predict(small_image)
small_pred = create_mask_pred(small_1hot)
ax1 = fig.add_subplot(int(str(f'33{i*3+1}')))
ax1.imshow(small_image[0,:,:,WHICH_BAND])
ax1.set_title("landsat image")
ax1.set_ylabel(site_dict[site_perm[i]])
cmap = plt.cm.get_cmap('viridis', num_class)
ax2= fig.add_subplot(int(str(f'33{i*3+2}')))
ax2.set_title("predicted classification")
im2 = ax2.imshow(small_pred,vmin=0, vmax=num_class-1,cmap=cmap)
cmap = plt.cm.get_cmap('viridis', num_class)
ax3 = fig.add_subplot(int(str(f'33{i*3+3}')))
ax3.set_title("actual classification")
im2 = ax3.imshow(small_masks[site_perm[i]],vmin=0, vmax=num_class-1,cmap=cmap)
cmap = plt.cm.get_cmap('viridis', num_class)
cbar = plt.colorbar(
im2,
ticks=[3/8 + i*((num_class-1.0)/num_class) for i in range(num_class)],
orientation='horizontal',ax=fig.get_axes())
cbar.set_ticklabels([CLASS_DICT[i] for i in range(num_class)])
plt.show()
return (small_image, small_1hot)
def main():
pred_from_saved()
if __name__ == '__main__':
main()
| colefranks/MarshBoundaries | PredictFromSaved.py | PredictFromSaved.py | py | 3,703 | python | en | code | 0 | github-code | 90 |
73034649898 | import unittest
# Utils libs
import os
import numpy as np
import pandas as pd
from words_n_fun.preprocessing import synonym_malefemale_replacement
# Disable logging
import logging
logging.disable(logging.CRITICAL)
class SynonymTests(unittest.TestCase):
'''Main class to test all functions in synonym_malefemale_replacement.py'''
def setUp(self):
'''SetUp fonction'''
# On se place dans le bon répertoire
# Change directory to script directory
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
def test_remove_gender_synonyms(self):
'''Testing function synonym_malefemale_replacement.remove_gender_synonyms'''
docs = ["Chauffeur(se) accompagnateur(trice) pers à mob - 5 ans de expérience.", "Je maîtrise 12 langages informatiques dont le C & j'ai le Permis B", "Coordinateur d'Equipe d'Action Territoriale ", 5, None, "serveur/serveur(se), agriculteur (trice) blabla ouvrier/ ouvrière blabla aide apprenti boucher /aide apprentie bouchere"]
docs_gender_syn_removed = ['Chauffeur accompagnateur pers à mob - 5 ans de expérience.', "Je maîtrise 12 langages informatiques dont le C & j'ai le Permis B", "Coordinateur d'Equipe d'Action Territoriale ", None, None, 'serveur , agriculteur blabla ouvrier blabla aide apprenti boucher']
# Vérification du fonctionnement type
self.assertEqual(list(synonym_malefemale_replacement.remove_gender_synonyms(pd.Series(docs)).replace({np.nan:None})), docs_gender_syn_removed)
def test_matching_words(self):
'''Testing function synonym_malefemale_replacement.matching_words'''
word1 = 'serveur'
word2 = 'serveuse'
result = ('serveur', 'serveuse', 'serveur')
# Vérification du fonctionnement type
self.assertEqual(synonym_malefemale_replacement.matching_words(word1,word2), result)
def test_update_synonyms_set(self):
'''Testing function synonym_malefemale_replacement.update_synonyms_set'''
synonyms_set = {}
match = [('serveur', 'serveuse', ''),('boucher','ere',''),('boucher','bouchere','')]
numligne = 1
result = {('serveur', 'serveuse', 'serveur'): 1, ('boucher', 'ere', 'boucher'): 1,('boucher', 'bouchere', 'boucher'): 1}
# Vérification du fonctionnement type
self.assertEqual(synonym_malefemale_replacement.update_synonyms_set(synonyms_set, match, numligne), result)
# Execution des tests
if __name__ == '__main__':
unittest.main()
| OSS-Pole-Emploi/words_n_fun | tests/test_5_synonym_malefemale_replacement.py | test_5_synonym_malefemale_replacement.py | py | 2,559 | python | en | code | 20 | github-code | 90 |
43017290689 | import importlib
import plone.testing.zope
from plone.testing.layer import Layer
from plone.testing.zca import ZCMLSandbox
from plone.testing.zope import WSGI_SERVER
from Testing.makerequest import makerequest
from zope.publisher.browser import TestRequest
ZCMLLayer = ZCMLSandbox(
None, 'Products.Formulator:ZCML', __name__, 'ftesting.zcml',
importlib.import_module(__name__.replace('.testing', '')))
class FormulatorLayer(Layer):
defaultBases = (ZCMLLayer, WSGI_SERVER, )
PRODUCTS = (
'Products.Formulator',
'zeam.form.base',
)
USERS = {
'manager': ['Manager'],
}
def login(self, username):
"""Login with the given id."""
userfolder = self['app'].acl_users
plone.testing.zope.login(userfolder, username)
def setUp(self):
with plone.testing.zope.zopeApp(self['zodbDB']) as app:
for product in self.PRODUCTS:
plone.testing.zope.installProduct(app, product)
uf = app.acl_users
for username, roles in self.USERS.items():
uf._doAddUser(username, username, roles, [])
def tearDown(self):
with plone.testing.zope.zopeApp(self['zodbDB']) as app:
for product in self.PRODUCTS:
plone.testing.zope.uninstallProduct(app, product)
def get_application(self):
"""Return root folder wrapped inside a test request, which is
the same object you have when you are working on a real
published request.
"""
return makerequest(self['app'], environ={'SERVER_NAME': 'localhost'})
default_users = {
'manager': ['Manager'],
}
FunctionalLayer = FormulatorLayer()
__all__ = ['FunctionalLayer', 'TestRequest']
| infrae/Products.Formulator | src/Products/Formulator/testing.py | testing.py | py | 1,759 | python | en | code | 1 | github-code | 90 |
5044763802 | from collections import Counter
def main():
N = int(input())
A = list(map(int, input().split()))
c = Counter(A)
c = sorted(c.items(), reverse=True)
for i in range(N):
if len(c) > i:
print(c[i][1])
else:
print(0)
if __name__ == "__main__":
main()
| valusun/Compe_Programming | AtCoder/ABC/ABC273/C.py | C.py | py | 314 | python | en | code | 0 | github-code | 90 |
26807918441 | # -*- coding:utf-8 -*-
import sqlite3
def executeSelectOne(sql):
conn = sqlite3.connect('my_db.sqlite3')
curs = conn.cursor()
curs.execute(sql)
data = curs.fetchone()
conn.close()
return data
def executeSelectAll(sql):
conn = sqlite3.connect('my_db.sqlite3')
curs = conn.cursor()
curs.execute(sql)
data = curs.fetchall()
conn.close()
return data
def executeSQL(sql):
conn = sqlite3.connect('my_db.sqlite3')
try:
curs = conn.cursor()
curs.execute(sql)
conn.commit()
conn.close()
return True
except sqlite3.IntegrityError:
conn.close()
return False
| UstymHanyk/python_hmwr | week 6/executeSqlite3.py | executeSqlite3.py | py | 669 | python | en | code | 0 | github-code | 90 |
16948290871 | from typing import List
class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
n = len(nums)
res = [0] * n
left = 0
right = n - 1
for i in range(n-1, -1, -1):
if abs(nums[left]) > abs(nums[right]):
num = nums[left]
left += 1
else:
num = nums[right]
right -= 1
res[i] = num * num
return res
def sortedSquares1(self, nums: List[int]) -> List[int]:
nums = list(map(lambda x: x** 2, nums))
nums.sort()
return nums
def sortedSquares2(self, nums: List[int]) -> List[int]:
return sorted([x*x for x in nums])
nums = [-4,-1,0,3,10]
s = Solution()
print(s.sortedSquares(nums))
print(s.sortedSquares2(nums))
print(s.sortedSquares3(nums))
| iamsuman/algorithms | iv/Leetcode/easy/977_squares_of_sorted_array.py | 977_squares_of_sorted_array.py | py | 841 | python | en | code | 2 | github-code | 90 |
1213076764 | # @Time : 2019/9/2 13:54
# @Author : Libuda
# @FileName: user_login.py
# @Software: PyCharm
from TestMain.utils import Utils
from TestMain.test_data_config import TestDataConfig
class Login:
"""
专门为登录测试服务 因为其并不是提交数据 而是将数据封装到请求头中
"""
def __init__(self, file_name=None):
self.utils = Utils()
if file_name:
self.test_data = TestDataConfig(file_name)
else:
self.test_data = TestDataConfig("")
self.test_res = self.test_data.get_all_test()
def get_headers(self):
"""
读取全部测试用例 并将其封装成headers返回
:return:
"""
res = []
if self.test_res:
for one in self.test_res:
tem = self.utils.encapsulate_headers(*one)
res.append(tem)
return res
if __name__ == "__main__":
log = Login("C:/Users/lenovo/PycharmProjects/自动化测试/测试实践/Excel/test_Login.xls")
print(log.test_res)
res = log.get_headers()
print(res)
| budaLi/Unittest | MyAutoTest/templete/user_login.py | user_login.py | py | 1,100 | python | en | code | 4 | github-code | 90 |
25749801554 | import os
import json
import argparse
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from fbprophet import Prophet
from fbprophet.serialize import model_to_json
from azureml.core import Workspace, Dataset
from azureml.core.run import Run
train_dataset_name = 'sales_train1'
test_dataset_name = 'sales_test1'
holiday_dataset_name = 'sales_holidays'
output_folder = 'outputs'
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
# there is a problem in test dataset where Sales = 0
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def main():
# run instance from the runtime experiment
run = Run.get_context()
# run remotely
ws = run.experiment.workspace
# run locally
#ws = Workspace.from_config(path='../config.json')
# get train, test, holidays datasets
train_df = Dataset.get_by_name(
ws, name=train_dataset_name).to_pandas_dataframe()[['Date', 'Sales']]
train_df['Date'] = pd.DatetimeIndex(train_df['Date'])
print('train_df', train_df)
test_df = Dataset.get_by_name(
ws, name=test_dataset_name).to_pandas_dataframe()[['Date', 'Sales']]
test_df['Date'] = pd.DatetimeIndex(test_df['Date'])
print('test_df', test_df)
holiday_df = Dataset.get_by_name(
ws, name=holiday_dataset_name).to_pandas_dataframe()
holiday_df['ds'] = pd.DatetimeIndex(holiday_df['ds'])
# Fix this issue: raise KeyError(f"{labels} not found in axis") - KeyError: '[] not found in axis'
# https://www.jianshu.com/p/cd2d0cb63008
# https://github.com/facebook/prophet/issues/821#issuecomment-461996281
holiday_df = holiday_df.reset_index()
print('holiday_df', holiday_df)
# from the Prophet documentation every variables should have specific names
train_df = train_df.rename(columns={
'Date': 'ds',
'Sales': 'y'
})
print('train_df', train_df)
test_df = test_df.rename(columns={
'Date': 'ds',
'Sales': 'y'
})
print('test_df', test_df)
# add arguments to script
parser = argparse.ArgumentParser()
parser.add_argument('--seasonality_mode', type=str, default='additive',
help='Model seasonality mode')
parser.add_argument('--changepoint_prior_scale', type=float, default=0.05,
help='How flexible the changepoints are allowed to be')
parser.add_argument('--holidays_prior_scale', type=float, default=10.0,
help='Used to smoothning the effect of holidays')
parser.add_argument('--n_changepoints', type=int, default=25,
help='Number of change happen in the data')
args = parser.parse_args()
print('args', args)
model = Prophet(
changepoint_prior_scale=args.changepoint_prior_scale,
holidays_prior_scale=args.holidays_prior_scale,
n_changepoints=args.n_changepoints,
seasonality_mode=args.seasonality_mode,
weekly_seasonality=True,
daily_seasonality=True,
yearly_seasonality=True,
holidays=holiday_df,
interval_width=0.95
)
model.fit(train_df)
# make future dates the same as test dataset
test_forecast = model.make_future_dataframe(
periods=(test_df['ds'].max() - test_df['ds'].min()).days + 1,
freq='D',
include_history=False
)
print('test_forecast', test_forecast)
# predict test
test_predictions = model.predict(test_forecast)
print('test_predictions', test_predictions)
test_predictions = test_predictions[['ds', 'yhat']]
print('test_predictions', test_predictions)
# mean_absolute_percentage_error(test_df['y'], abs(test_predictions['yhat']))
score_value = r2_score(test_df['y'], test_predictions['yhat'])
print('score_value', score_value)
run.log('score_value', np.float(score_value))
# files saved to 'outputs' folder will be automatically uploaded to run history
os.makedirs(output_folder, exist_ok=True)
with open(f'{output_folder}/model.json', 'w') as fout:
json.dump(model_to_json(model), fout) # Save model
# standalone run: python train.py --seasonality_mode=additive --changepoint_prior_scale=0.05 --holidays_prior_scale=10.0 --n_changepoints=25
if __name__ == '__main__':
main()
| elbertsoftware/ML-Engineering | script/train.py | train.py | py | 4,366 | python | en | code | 1 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.